You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by aw...@apache.org on 2018/01/29 21:10:04 UTC

[01/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Repository: cassandra-dtest
Updated Branches:
  refs/heads/master f4eda3a50 -> 49b2dda4e


http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/user_types_test.py
----------------------------------------------------------------------
diff --git a/user_types_test.py b/user_types_test.py
index 7ca06dc..b172a47 100644
--- a/user_types_test.py
+++ b/user_types_test.py
@@ -1,12 +1,19 @@
 import time
 import uuid
+import pytest
+import logging
+
+from flaky import flaky
 
 from cassandra import ConsistencyLevel, Unauthorized
 from cassandra.query import SimpleStatement
 
 from dtest import Tester, create_ks
 from tools.assertions import assert_invalid
-from tools.decorators import since
+from plugins.assert_tools import assert_regexp_matches
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 def listify(item):
@@ -23,13 +30,13 @@ def listify(item):
 
 class TestUserTypes(Tester):
     def assertUnauthorized(self, session, query, message):
-        with self.assertRaises(Unauthorized) as cm:
+        with pytest.raises(Unauthorized) as cm:
             session.execute(query)
-        self.assertRegexpMatches(cm.exception.message, message)
+            assert_regexp_matches(repr(cm._excinfo[1]), message)
 
     def assertNoTypes(self, session):
-        for keyspace in session.cluster.metadata.keyspaces.values():
-            self.assertEqual(0, len(keyspace.user_types))
+        for keyspace in list(session.cluster.metadata.keyspaces.values()):
+            assert 0 == len(keyspace.user_types)
 
     def test_type_dropping(self):
         """
@@ -190,7 +197,7 @@ class TestUserTypes(Tester):
               SELECT * FROM simple_table;
            """
         rows = list(session.execute(stmt))
-        self.assertEqual(0, len(rows))
+        assert 0 == len(rows)
 
     def test_nested_user_types(self):
         """Tests user types within user types"""
@@ -285,12 +292,12 @@ class TestUserTypes(Tester):
         rows = list(session.execute(stmt))
 
         primary_item, other_items, other_containers = rows[0]
-        self.assertEqual(listify(primary_item), [u'test', u'test2'])
-        self.assertEqual(listify(other_items), [u'stuff', [u'one', u'two']])
-        self.assertEqual(listify(other_containers), [[u'stuff2', [u'one_other', u'two_other']], [u'stuff3', [u'one_2_other', u'two_2_other']], [u'stuff4', [u'one_3_other', u'two_3_other']]])
+        assert listify(primary_item) == ['test', 'test2']
+        assert listify(other_items) == ['stuff', ['one', 'two']]
+        assert listify(other_containers) == [['stuff2', ['one_other', 'two_other']], ['stuff3', ['one_2_other', 'two_2_other']], ['stuff4', ['one_3_other', 'two_3_other']]]
 
         #  Generate some repetitive data and check it for it's contents:
-        for x in xrange(50):
+        for x in range(50):
 
             # Create row:
             _id = uuid.uuid4()
@@ -323,7 +330,7 @@ class TestUserTypes(Tester):
             rows = list(session.execute(stmt))
 
             items = rows[0][0]
-            self.assertEqual(listify(items), [[u'stuff3', [u'one_2_other', u'two_2_other']], [u'stuff4', [u'one_3_other', u'two_3_other']]])
+            assert listify(items) == [['stuff3', ['one_2_other', 'two_2_other']], ['stuff4', ['one_3_other', 'two_3_other']]]
 
     def test_type_as_part_of_pkey(self):
         """Tests user types as part of a composite pkey"""
@@ -382,9 +389,10 @@ class TestUserTypes(Tester):
         rows = session.execute(stmt)
 
         row_uuid, first_name, like = rows[0]
-        self.assertEqual(first_name, u'Nero')
-        self.assertEqual(like, u'arson')
+        assert first_name == 'Nero'
+        assert like == 'arson'
 
+    @flaky
     def test_type_secondary_indexing(self):
         """
         Confirm that user types are secondary-indexable
@@ -426,7 +434,7 @@ class TestUserTypes(Tester):
               SELECT * from person_likes where name = {first:'Nero', middle: 'Claudius Caesar Augustus', last: 'Germanicus'};
             """
         rows = list(session.execute(stmt))
-        self.assertEqual(0, len(rows))
+        assert 0 == len(rows)
 
         # add a row which doesn't specify data for the indexed column, and query again
         _id = uuid.uuid4()
@@ -441,7 +449,7 @@ class TestUserTypes(Tester):
             """
 
         rows = list(session.execute(stmt))
-        self.assertEqual(0, len(rows))
+        assert 0 == len(rows)
 
         # finally let's add a queryable row, and get it back using the index
         _id = uuid.uuid4()
@@ -460,9 +468,9 @@ class TestUserTypes(Tester):
 
         row_uuid, first_name, like = rows[0]
 
-        self.assertEqual(str(row_uuid), str(_id))
-        self.assertEqual(first_name, u'Nero')
-        self.assertEqual(like, u'arson')
+        assert str(row_uuid) == str(_id)
+        assert first_name == 'Nero'
+        assert like == 'arson'
 
         # rename a field in the type and make sure the index still works
         stmt = """
@@ -478,9 +486,9 @@ class TestUserTypes(Tester):
 
         row_uuid, first_name, like = rows[0]
 
-        self.assertEqual(str(row_uuid), str(_id))
-        self.assertEqual(first_name, u'Nero')
-        self.assertEqual(like, u'arson')
+        assert str(row_uuid) == str(_id)
+        assert first_name == 'Nero'
+        assert like == 'arson'
 
         # add another row to be sure the index is still adding new data
         _id = uuid.uuid4()
@@ -499,15 +507,15 @@ class TestUserTypes(Tester):
 
         row_uuid, first_name, like = rows[0]
 
-        self.assertEqual(str(row_uuid), str(_id))
-        self.assertEqual(first_name, u'Abraham')
-        self.assertEqual(like, u'preserving unions')
+        assert str(row_uuid) == str(_id)
+        assert first_name == 'Abraham'
+        assert like == 'preserving unions'
 
     def test_type_keyspace_permission_isolation(self):
         """
         Confirm permissions are respected for types in different keyspaces
         """
-        self.ignore_log_patterns = [
+        self.fixture_dtest_setup.ignore_log_patterns = [
             # I think this happens when permissions change and a node becomes temporarily unavailable
             # and it's probably ok to ignore on this test, as I can see the schema changes propogating
             # almost immediately after
@@ -609,10 +617,10 @@ class TestUserTypes(Tester):
         session.execute("INSERT INTO bucket (id, my_item) VALUES (1, {sub_one: 'test'})")
 
         rows = list(session.execute("SELECT my_item FROM bucket WHERE id=0"))
-        self.assertEqual(listify(rows[0]), [[u'test', None]])
+        assert listify(rows[0]) == [['test', None]]
 
         rows = list(session.execute("SELECT my_item FROM bucket WHERE id=1"))
-        self.assertEqual(listify(rows[0]), [[u'test', None]])
+        assert listify(rows[0]) == [['test', None]]
 
     def test_no_counters_in_user_types(self):
         # CASSANDRA-7672
@@ -665,7 +673,7 @@ class TestUserTypes(Tester):
 
         # create a bit of data and expect a natural order based on clustering user types
 
-        ids = range(1, 10)
+        ids = list(range(1, 10))
 
         for _id in ids:
             session.execute("INSERT INTO letters (id, letterpair) VALUES ({}, {{first:'a', second:'z'}})".format(_id))
@@ -678,10 +686,10 @@ class TestUserTypes(Tester):
         for _id in ids:
             res = list(session.execute("SELECT letterpair FROM letters where id = {}".format(_id)))
 
-            self.assertEqual(listify(res), [[[u'a', u'z']], [[u'c', u'a']], [[u'c', u'f']], [[u'c', u'z']], [[u'd', u'e']], [[u'z', u'a']]])
+            assert listify(res) == [[['a', 'z']], [['c', 'a']], [['c', 'f']], [['c', 'z']], [['d', 'e']], [['z', 'a']]]
 
     @since('3.6')
-    def udt_subfield_test(self):
+    def test_udt_subfield(self):
         """
         @jira_ticket CASSANDRA-7423
         @since 3.6
@@ -701,7 +709,7 @@ class TestUserTypes(Tester):
         session.execute("INSERT INTO t (id, v) VALUES (0, {third: 2, second: 1})")
         session.execute("UPDATE t set v.first = 'a' WHERE id=0")
         rows = list(session.execute("SELECT * FROM t WHERE id = 0"))
-        self.assertEqual(listify(rows), [[0, ['a', 1, 2]]])
+        assert listify(rows) == [[0, ['a', 1, 2]]]
 
         # Create a full udt
         # Update a subfield on the udt
@@ -709,13 +717,13 @@ class TestUserTypes(Tester):
         session.execute("INSERT INTO t (id, v) VALUES (0, {first: 'c', second: 3, third: 33})")
         session.execute("UPDATE t set v.second = 5 where id=0")
         rows = list(session.execute("SELECT * FROM t WHERE id=0"))
-        self.assertEqual(listify(rows), [[0, ['c', 5, 33]]])
+        assert listify(rows) == [[0, ['c', 5, 33]]]
 
         # Rewrite the entire udt
         # Read back
         session.execute("INSERT INTO t (id, v) VALUES (0, {first: 'alpha', second: 111, third: 100})")
         rows = list(session.execute("SELECT * FROM t WHERE id=0"))
-        self.assertEqual(listify(rows), [[0, ['alpha', 111, 100]]])
+        assert listify(rows) == [[0, ['alpha', 111, 100]]]
 
         # Send three subfield updates to udt
         # Read back
@@ -723,7 +731,7 @@ class TestUserTypes(Tester):
         session.execute("UPDATE t set v.first = 'delta' WHERE id=0")
         session.execute("UPDATE t set v.second = -10 WHERE id=0")
         rows = list(session.execute("SELECT * FROM t WHERE id=0"))
-        self.assertEqual(listify(rows), [[0, ['delta', -10, 100]]])
+        assert listify(rows) == [[0, ['delta', -10, 100]]]
 
         # Send conflicting updates serially to different nodes
         # Read back
@@ -736,7 +744,7 @@ class TestUserTypes(Tester):
         session3.execute("UPDATE user_types.t set v.third = 103 WHERE id=0")
         query = SimpleStatement("SELECT * FROM t WHERE id = 0", consistency_level=ConsistencyLevel.ALL)
         rows = list(session.execute(query))
-        self.assertEqual(listify(rows), [[0, ['delta', -10, 103]]])
+        assert listify(rows) == [[0, ['delta', -10, 103]]]
         session1.shutdown()
         session2.shutdown()
         session3.shutdown()
@@ -745,14 +753,14 @@ class TestUserTypes(Tester):
         session.execute("INSERT INTO t (id, v) VALUES (0, {first:'cass', second:3, third:0})")
         session.execute("UPDATE t SET v.first = null WHERE id = 0")
         rows = list(session.execute("SELECT * FROM t WHERE id=0"))
-        self.assertEqual(listify(rows), [[0, [None, 3, 0]]])
+        assert listify(rows) == [[0, [None, 3, 0]]]
 
         rows = list(session.execute("SELECT v.first FROM t WHERE id=0"))
-        self.assertEqual(listify(rows), [[None]])
+        assert listify(rows) == [[None]]
         rows = list(session.execute("SELECT v.second FROM t WHERE id=0"))
-        self.assertEqual(listify(rows), [[3]])
+        assert listify(rows) == [[3]]
         rows = list(session.execute("SELECT v.third FROM t WHERE id=0"))
-        self.assertEqual(listify(rows), [[0]])
+        assert listify(rows) == [[0]]
 
     @since('2.2')
     def test_user_type_isolation(self):
@@ -761,7 +769,6 @@ class TestUserTypes(Tester):
         @jira_ticket CASSANDRA-9409
         @since 2.2
         """
-
         cluster = self.cluster
         cluster.populate(1).start()
         node1 = cluster.nodelist()[0]

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/wide_rows_test.py
----------------------------------------------------------------------
diff --git a/wide_rows_test.py b/wide_rows_test.py
index 63e6fb1..078f0c5 100644
--- a/wide_rows_test.py
+++ b/wide_rows_test.py
@@ -1,7 +1,8 @@
 import datetime
 import random
+import logging
 
-from dtest import Tester, debug, create_ks
+from dtest import Tester, create_ks
 from tools.assertions import assert_length_equal
 
 status_messages = (
@@ -22,6 +23,8 @@ clients = (
     "Emacs"
 )
 
+logger = logging.getLogger(__name__)
+
 
 class TestWideRows(Tester):
     def test_wide_rows(self):
@@ -36,29 +39,29 @@ class TestWideRows(Tester):
         start_time = datetime.datetime.now()
         create_ks(session, 'wide_rows', 1)
         # Simple timeline:  user -> {date: value, ...}
-        debug('Create Table....')
+        logger.debug('Create Table....')
         session.execute('CREATE TABLE user_events (userid text, event timestamp, value text, PRIMARY KEY (userid, event));')
         date = datetime.datetime.now()
         # Create a large timeline for each of a group of users:
         for user in ('ryan', 'cathy', 'mallen', 'joaquin', 'erin', 'ham'):
-            debug("Writing values for: %s" % user)
-            for day in xrange(5000):
+            logger.debug("Writing values for: %s" % user)
+            for day in range(5000):
                 date_str = (date + datetime.timedelta(day)).strftime("%Y-%m-%d")
                 client = random.choice(clients)
                 msg = random.choice(status_messages)
                 query = "UPDATE user_events SET value = '{msg:%s, client:%s}' WHERE userid='%s' and event='%s';" % (msg, client, user, date_str)
-                # debug(query)
+                # logger.debug(query)
                 session.execute(query)
 
-        # debug('Duration of test: %s' % (datetime.datetime.now() - start_time))
+        # logger.debug('Duration of test: %s' % (datetime.datetime.now() - start_time))
 
         # Pick out an update for a specific date:
         query = "SELECT value FROM user_events WHERE userid='ryan' and event='%s'" % \
                 (date + datetime.timedelta(10)).strftime("%Y-%m-%d")
         rows = session.execute(query)
         for value in rows:
-            debug(value)
-            self.assertGreater(len(value[0]), 0)
+            logger.debug(value)
+            assert len(value[0]) > 0
 
     def test_column_index_stress(self):
         """Write a large number of columns to a single row and set

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/write_failures_test.py
----------------------------------------------------------------------
diff --git a/write_failures_test.py b/write_failures_test.py
index 4a507f4..17ce50e 100644
--- a/write_failures_test.py
+++ b/write_failures_test.py
@@ -1,11 +1,15 @@
 import uuid
+import pytest
+import logging
 
 from cassandra import ConsistencyLevel, WriteFailure, WriteTimeout
 
-from dtest import Tester, supports_v5_protocol
-from thrift_bindings.v22 import ttypes as thrift_types
-from thrift_tests import get_thrift_client
-from tools.decorators import since
+from dtest import Tester
+from thrift_bindings.thrift010 import ttypes as thrift_types
+from thrift_test import get_thrift_client
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 KEYSPACE = "foo"
 
@@ -21,26 +25,23 @@ class TestWriteFailures(Tester):
     They require CURRENT_VERSION = VERSION_4 in CassandraDaemon.Server
     otherwise these tests will fail.
     """
-
-    def setUp(self):
-        super(TestWriteFailures, self).setUp()
-
-        self.ignore_log_patterns = [
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
             "Testing write failures",  # The error to simulate a write failure
             "ERROR WRITE_FAILURE",     # Logged in DEBUG mode for write failures
             "MigrationStage"           # This occurs sometimes due to node down (because of restart)
-        ]
+        )
 
-        self.supports_v5_protocol = supports_v5_protocol(self.cluster.version())
+    @pytest.fixture(scope="function", autouse=True)
+    def fixture_set_test_defauls(self, fixture_dtest_setup):
+        self.supports_v5_protocol = fixture_dtest_setup.supports_v5_protocol(fixture_dtest_setup.cluster.version())
         self.expected_expt = WriteFailure
         self.protocol_version = 5 if self.supports_v5_protocol else 4
         self.replication_factor = 3
         self.consistency_level = ConsistencyLevel.ALL
         self.failing_nodes = [1, 2]
 
-    def tearDown(self):
-        super(TestWriteFailures, self).tearDown()
-
     def _prepare_cluster(self, start_rpc=False):
         self.cluster.populate(3)
 
@@ -48,7 +49,7 @@ class TestWriteFailures(Tester):
             self.cluster.set_configuration_options(values={'start_rpc': True})
 
         self.cluster.start(wait_for_binary_proto=True)
-        self.nodes = self.cluster.nodes.values()
+        self.nodes = list(self.cluster.nodes.values())
 
         session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version)
 
@@ -81,9 +82,9 @@ class TestWriteFailures(Tester):
         if self.expected_expt is None:
             session.execute(statement)
         else:
-            with self.assertRaises(self.expected_expt) as cm:
+            with pytest.raises(self.expected_expt) as cm:
                 session.execute(statement)
-            return cm.exception
+            return cm._excinfo[1]
 
     def _assert_error_code_map_exists_with_code(self, exception, expected_code):
         """
@@ -91,14 +92,14 @@ class TestWriteFailures(Tester):
         where at least one node responded with some expected code.
         This is meant for testing failure exceptions on protocol v5.
         """
-        self.assertIsNotNone(exception)
-        self.assertIsNotNone(exception.error_code_map)
+        assert exception is not None
+        assert exception.error_code_map is not None
         expected_code_found = False
-        for error_code in exception.error_code_map.values():
+        for error_code in list(exception.error_code_map.values()):
             if error_code == expected_code:
                 expected_code_found = True
                 break
-        self.assertTrue(expected_code_found, "The error code map did not contain " + str(expected_code))
+        assert expected_code_found, "The error code map did not contain " + str(expected_code)
 
     @since('2.2', max_version='2.2.x')
     def test_mutation_v2(self):
@@ -147,8 +148,8 @@ class TestWriteFailures(Tester):
 
     def test_mutation_one(self):
         """
-            A WriteFailure is received at consistency level ONE
-            if all nodes fail
+        A WriteFailure is received at consistency level ONE
+        if all nodes fail
         """
         self.consistency_level = ConsistencyLevel.ONE
         self.failing_nodes = [0, 1, 2]
@@ -220,7 +221,7 @@ class TestWriteFailures(Tester):
         client.transport.open()
         client.set_keyspace(KEYSPACE)
 
-        with self.assertRaises(self.expected_expt):
+        with pytest.raises(self.expected_expt):
             client.insert('key1',
                           thrift_types.ColumnParent('mytable'),
                           thrift_types.Column('value', 'Value 1', 0),


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[34/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cassandra-thrift/v11/Cassandra.py
----------------------------------------------------------------------
diff --git a/cassandra-thrift/v11/Cassandra.py b/cassandra-thrift/v11/Cassandra.py
index 015f1a5..ae956cf 100644
--- a/cassandra-thrift/v11/Cassandra.py
+++ b/cassandra-thrift/v11/Cassandra.py
@@ -7,7 +7,7 @@
 #
 
 from thrift.Thrift import TType, TMessageType, TException
-from ttypes import *
+from .ttypes import *
 from thrift.Thrift import TProcessor
 from thrift.transport import TTransport
 from thrift.protocol import TBinaryProtocol, TProtocol
@@ -1791,9 +1791,9 @@ class Processor(Iface, TProcessor):
     result = login_result()
     try:
       self._handler.login(args.auth_request)
-    except AuthenticationException, authnx:
+    except AuthenticationException as authnx:
       result.authnx = authnx
-    except AuthorizationException, authzx:
+    except AuthorizationException as authzx:
       result.authzx = authzx
     oprot.writeMessageBegin("login", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1807,7 +1807,7 @@ class Processor(Iface, TProcessor):
     result = set_keyspace_result()
     try:
       self._handler.set_keyspace(args.keyspace)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
     oprot.writeMessageBegin("set_keyspace", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1821,13 +1821,13 @@ class Processor(Iface, TProcessor):
     result = get_result()
     try:
       result.success = self._handler.get(args.key, args.column_path, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except NotFoundException, nfe:
+    except NotFoundException as nfe:
       result.nfe = nfe
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("get", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1841,11 +1841,11 @@ class Processor(Iface, TProcessor):
     result = get_slice_result()
     try:
       result.success = self._handler.get_slice(args.key, args.column_parent, args.predicate, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("get_slice", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1859,11 +1859,11 @@ class Processor(Iface, TProcessor):
     result = get_count_result()
     try:
       result.success = self._handler.get_count(args.key, args.column_parent, args.predicate, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("get_count", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1877,11 +1877,11 @@ class Processor(Iface, TProcessor):
     result = multiget_slice_result()
     try:
       result.success = self._handler.multiget_slice(args.keys, args.column_parent, args.predicate, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("multiget_slice", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1895,11 +1895,11 @@ class Processor(Iface, TProcessor):
     result = multiget_count_result()
     try:
       result.success = self._handler.multiget_count(args.keys, args.column_parent, args.predicate, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("multiget_count", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1913,11 +1913,11 @@ class Processor(Iface, TProcessor):
     result = get_range_slices_result()
     try:
       result.success = self._handler.get_range_slices(args.column_parent, args.predicate, args.range, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("get_range_slices", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1931,11 +1931,11 @@ class Processor(Iface, TProcessor):
     result = get_paged_slice_result()
     try:
       result.success = self._handler.get_paged_slice(args.column_family, args.range, args.start_column, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("get_paged_slice", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1949,11 +1949,11 @@ class Processor(Iface, TProcessor):
     result = get_indexed_slices_result()
     try:
       result.success = self._handler.get_indexed_slices(args.column_parent, args.index_clause, args.column_predicate, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("get_indexed_slices", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1967,11 +1967,11 @@ class Processor(Iface, TProcessor):
     result = insert_result()
     try:
       self._handler.insert(args.key, args.column_parent, args.column, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("insert", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -1985,11 +1985,11 @@ class Processor(Iface, TProcessor):
     result = add_result()
     try:
       self._handler.add(args.key, args.column_parent, args.column, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("add", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2003,11 +2003,11 @@ class Processor(Iface, TProcessor):
     result = remove_result()
     try:
       self._handler.remove(args.key, args.column_path, args.timestamp, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("remove", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2021,11 +2021,11 @@ class Processor(Iface, TProcessor):
     result = remove_counter_result()
     try:
       self._handler.remove_counter(args.key, args.path, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("remove_counter", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2039,11 +2039,11 @@ class Processor(Iface, TProcessor):
     result = batch_mutate_result()
     try:
       self._handler.batch_mutate(args.mutation_map, args.consistency_level)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("batch_mutate", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2057,11 +2057,11 @@ class Processor(Iface, TProcessor):
     result = truncate_result()
     try:
       self._handler.truncate(args.cfname)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
     oprot.writeMessageBegin("truncate", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2075,7 +2075,7 @@ class Processor(Iface, TProcessor):
     result = describe_schema_versions_result()
     try:
       result.success = self._handler.describe_schema_versions()
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
     oprot.writeMessageBegin("describe_schema_versions", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2089,7 +2089,7 @@ class Processor(Iface, TProcessor):
     result = describe_keyspaces_result()
     try:
       result.success = self._handler.describe_keyspaces()
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
     oprot.writeMessageBegin("describe_keyspaces", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2125,7 +2125,7 @@ class Processor(Iface, TProcessor):
     result = describe_ring_result()
     try:
       result.success = self._handler.describe_ring(args.keyspace)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
     oprot.writeMessageBegin("describe_ring", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2161,9 +2161,9 @@ class Processor(Iface, TProcessor):
     result = describe_keyspace_result()
     try:
       result.success = self._handler.describe_keyspace(args.keyspace)
-    except NotFoundException, nfe:
+    except NotFoundException as nfe:
       result.nfe = nfe
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
     oprot.writeMessageBegin("describe_keyspace", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2177,7 +2177,7 @@ class Processor(Iface, TProcessor):
     result = describe_splits_result()
     try:
       result.success = self._handler.describe_splits(args.cfName, args.start_token, args.end_token, args.keys_per_split)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
     oprot.writeMessageBegin("describe_splits", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2191,9 +2191,9 @@ class Processor(Iface, TProcessor):
     result = system_add_column_family_result()
     try:
       result.success = self._handler.system_add_column_family(args.cf_def)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except SchemaDisagreementException, sde:
+    except SchemaDisagreementException as sde:
       result.sde = sde
     oprot.writeMessageBegin("system_add_column_family", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2207,9 +2207,9 @@ class Processor(Iface, TProcessor):
     result = system_drop_column_family_result()
     try:
       result.success = self._handler.system_drop_column_family(args.column_family)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except SchemaDisagreementException, sde:
+    except SchemaDisagreementException as sde:
       result.sde = sde
     oprot.writeMessageBegin("system_drop_column_family", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2223,9 +2223,9 @@ class Processor(Iface, TProcessor):
     result = system_add_keyspace_result()
     try:
       result.success = self._handler.system_add_keyspace(args.ks_def)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except SchemaDisagreementException, sde:
+    except SchemaDisagreementException as sde:
       result.sde = sde
     oprot.writeMessageBegin("system_add_keyspace", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2239,9 +2239,9 @@ class Processor(Iface, TProcessor):
     result = system_drop_keyspace_result()
     try:
       result.success = self._handler.system_drop_keyspace(args.keyspace)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except SchemaDisagreementException, sde:
+    except SchemaDisagreementException as sde:
       result.sde = sde
     oprot.writeMessageBegin("system_drop_keyspace", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2255,9 +2255,9 @@ class Processor(Iface, TProcessor):
     result = system_update_keyspace_result()
     try:
       result.success = self._handler.system_update_keyspace(args.ks_def)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except SchemaDisagreementException, sde:
+    except SchemaDisagreementException as sde:
       result.sde = sde
     oprot.writeMessageBegin("system_update_keyspace", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2271,9 +2271,9 @@ class Processor(Iface, TProcessor):
     result = system_update_column_family_result()
     try:
       result.success = self._handler.system_update_column_family(args.cf_def)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except SchemaDisagreementException, sde:
+    except SchemaDisagreementException as sde:
       result.sde = sde
     oprot.writeMessageBegin("system_update_column_family", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2287,13 +2287,13 @@ class Processor(Iface, TProcessor):
     result = execute_cql_query_result()
     try:
       result.success = self._handler.execute_cql_query(args.query, args.compression)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
-    except SchemaDisagreementException, sde:
+    except SchemaDisagreementException as sde:
       result.sde = sde
     oprot.writeMessageBegin("execute_cql_query", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2307,7 +2307,7 @@ class Processor(Iface, TProcessor):
     result = prepare_cql_query_result()
     try:
       result.success = self._handler.prepare_cql_query(args.query, args.compression)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
     oprot.writeMessageBegin("prepare_cql_query", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2321,13 +2321,13 @@ class Processor(Iface, TProcessor):
     result = execute_prepared_cql_query_result()
     try:
       result.success = self._handler.execute_prepared_cql_query(args.itemId, args.values)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
-    except UnavailableException, ue:
+    except UnavailableException as ue:
       result.ue = ue
-    except TimedOutException, te:
+    except TimedOutException as te:
       result.te = te
-    except SchemaDisagreementException, sde:
+    except SchemaDisagreementException as sde:
       result.sde = sde
     oprot.writeMessageBegin("execute_prepared_cql_query", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2341,7 +2341,7 @@ class Processor(Iface, TProcessor):
     result = set_cql_version_result()
     try:
       self._handler.set_cql_version(args.version)
-    except InvalidRequestException, ire:
+    except InvalidRequestException as ire:
       result.ire = ire
     oprot.writeMessageBegin("set_cql_version", TMessageType.REPLY, seqid)
     result.write(oprot)
@@ -2405,7 +2405,7 @@ class login_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2479,7 +2479,7 @@ class login_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2541,7 +2541,7 @@ class set_keyspace_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2602,7 +2602,7 @@ class set_keyspace_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2693,7 +2693,7 @@ class get_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2805,7 +2805,7 @@ class get_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2911,7 +2911,7 @@ class get_slice_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2955,7 +2955,7 @@ class get_slice_result:
         if ftype == TType.LIST:
           self.success = []
           (_etype171, _size168) = iprot.readListBegin()
-          for _i172 in xrange(_size168):
+          for _i172 in range(_size168):
             _elem173 = ColumnOrSuperColumn()
             _elem173.read(iprot)
             self.success.append(_elem173)
@@ -3018,7 +3018,7 @@ class get_slice_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3124,7 +3124,7 @@ class get_count_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3222,7 +3222,7 @@ class get_count_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3267,7 +3267,7 @@ class multiget_slice_args:
         if ftype == TType.LIST:
           self.keys = []
           (_etype178, _size175) = iprot.readListBegin()
-          for _i179 in xrange(_size175):
+          for _i179 in range(_size175):
             _elem180 = iprot.readString();
             self.keys.append(_elem180)
           iprot.readListEnd()
@@ -3336,7 +3336,7 @@ class multiget_slice_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3380,11 +3380,11 @@ class multiget_slice_result:
         if ftype == TType.MAP:
           self.success = {}
           (_ktype183, _vtype184, _size182 ) = iprot.readMapBegin() 
-          for _i186 in xrange(_size182):
+          for _i186 in range(_size182):
             _key187 = iprot.readString();
             _val188 = []
             (_etype192, _size189) = iprot.readListBegin()
-            for _i193 in xrange(_size189):
+            for _i193 in range(_size189):
               _elem194 = ColumnOrSuperColumn()
               _elem194.read(iprot)
               _val188.append(_elem194)
@@ -3424,7 +3424,7 @@ class multiget_slice_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.success))
-      for kiter195,viter196 in self.success.items():
+      for kiter195,viter196 in list(self.success.items()):
         oprot.writeString(kiter195)
         oprot.writeListBegin(TType.STRUCT, len(viter196))
         for iter197 in viter196:
@@ -3453,7 +3453,7 @@ class multiget_slice_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3498,7 +3498,7 @@ class multiget_count_args:
         if ftype == TType.LIST:
           self.keys = []
           (_etype201, _size198) = iprot.readListBegin()
-          for _i202 in xrange(_size198):
+          for _i202 in range(_size198):
             _elem203 = iprot.readString();
             self.keys.append(_elem203)
           iprot.readListEnd()
@@ -3567,7 +3567,7 @@ class multiget_count_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3611,7 +3611,7 @@ class multiget_count_result:
         if ftype == TType.MAP:
           self.success = {}
           (_ktype206, _vtype207, _size205 ) = iprot.readMapBegin() 
-          for _i209 in xrange(_size205):
+          for _i209 in range(_size205):
             _key210 = iprot.readString();
             _val211 = iprot.readI32();
             self.success[_key210] = _val211
@@ -3649,7 +3649,7 @@ class multiget_count_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.I32, len(self.success))
-      for kiter212,viter213 in self.success.items():
+      for kiter212,viter213 in list(self.success.items()):
         oprot.writeString(kiter212)
         oprot.writeI32(viter213)
       oprot.writeMapEnd()
@@ -3675,7 +3675,7 @@ class multiget_count_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3782,7 +3782,7 @@ class get_range_slices_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3826,7 +3826,7 @@ class get_range_slices_result:
         if ftype == TType.LIST:
           self.success = []
           (_etype217, _size214) = iprot.readListBegin()
-          for _i218 in xrange(_size214):
+          for _i218 in range(_size214):
             _elem219 = KeySlice()
             _elem219.read(iprot)
             self.success.append(_elem219)
@@ -3889,7 +3889,7 @@ class get_range_slices_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3994,7 +3994,7 @@ class get_paged_slice_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4038,7 +4038,7 @@ class get_paged_slice_result:
         if ftype == TType.LIST:
           self.success = []
           (_etype224, _size221) = iprot.readListBegin()
-          for _i225 in xrange(_size221):
+          for _i225 in range(_size221):
             _elem226 = KeySlice()
             _elem226.read(iprot)
             self.success.append(_elem226)
@@ -4101,7 +4101,7 @@ class get_paged_slice_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4208,7 +4208,7 @@ class get_indexed_slices_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4252,7 +4252,7 @@ class get_indexed_slices_result:
         if ftype == TType.LIST:
           self.success = []
           (_etype231, _size228) = iprot.readListBegin()
-          for _i232 in xrange(_size228):
+          for _i232 in range(_size228):
             _elem233 = KeySlice()
             _elem233.read(iprot)
             self.success.append(_elem233)
@@ -4315,7 +4315,7 @@ class get_indexed_slices_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4421,7 +4421,7 @@ class insert_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4508,7 +4508,7 @@ class insert_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4614,7 +4614,7 @@ class add_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4701,7 +4701,7 @@ class add_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4804,7 +4804,7 @@ class remove_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4891,7 +4891,7 @@ class remove_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -4982,7 +4982,7 @@ class remove_counter_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5069,7 +5069,7 @@ class remove_counter_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5108,15 +5108,15 @@ class batch_mutate_args:
         if ftype == TType.MAP:
           self.mutation_map = {}
           (_ktype236, _vtype237, _size235 ) = iprot.readMapBegin() 
-          for _i239 in xrange(_size235):
+          for _i239 in range(_size235):
             _key240 = iprot.readString();
             _val241 = {}
             (_ktype243, _vtype244, _size242 ) = iprot.readMapBegin() 
-            for _i246 in xrange(_size242):
+            for _i246 in range(_size242):
               _key247 = iprot.readString();
               _val248 = []
               (_etype252, _size249) = iprot.readListBegin()
-              for _i253 in xrange(_size249):
+              for _i253 in range(_size249):
                 _elem254 = Mutation()
                 _elem254.read(iprot)
                 _val248.append(_elem254)
@@ -5145,10 +5145,10 @@ class batch_mutate_args:
     if self.mutation_map is not None:
       oprot.writeFieldBegin('mutation_map', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.mutation_map))
-      for kiter255,viter256 in self.mutation_map.items():
+      for kiter255,viter256 in list(self.mutation_map.items()):
         oprot.writeString(kiter255)
         oprot.writeMapBegin(TType.STRING, TType.LIST, len(viter256))
-        for kiter257,viter258 in viter256.items():
+        for kiter257,viter258 in list(viter256.items()):
           oprot.writeString(kiter257)
           oprot.writeListBegin(TType.STRUCT, len(viter258))
           for iter259 in viter258:
@@ -5174,7 +5174,7 @@ class batch_mutate_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5261,7 +5261,7 @@ class batch_mutate_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5323,7 +5323,7 @@ class truncate_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5410,7 +5410,7 @@ class truncate_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5452,7 +5452,7 @@ class describe_schema_versions_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5490,11 +5490,11 @@ class describe_schema_versions_result:
         if ftype == TType.MAP:
           self.success = {}
           (_ktype261, _vtype262, _size260 ) = iprot.readMapBegin() 
-          for _i264 in xrange(_size260):
+          for _i264 in range(_size260):
             _key265 = iprot.readString();
             _val266 = []
             (_etype270, _size267) = iprot.readListBegin()
-            for _i271 in xrange(_size267):
+            for _i271 in range(_size267):
               _elem272 = iprot.readString();
               _val266.append(_elem272)
             iprot.readListEnd()
@@ -5521,7 +5521,7 @@ class describe_schema_versions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.success))
-      for kiter273,viter274 in self.success.items():
+      for kiter273,viter274 in list(self.success.items()):
         oprot.writeString(kiter273)
         oprot.writeListBegin(TType.STRING, len(viter274))
         for iter275 in viter274:
@@ -5542,7 +5542,7 @@ class describe_schema_versions_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5584,7 +5584,7 @@ class describe_keyspaces_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5622,7 +5622,7 @@ class describe_keyspaces_result:
         if ftype == TType.LIST:
           self.success = []
           (_etype279, _size276) = iprot.readListBegin()
-          for _i280 in xrange(_size276):
+          for _i280 in range(_size276):
             _elem281 = KsDef()
             _elem281.read(iprot)
             self.success.append(_elem281)
@@ -5665,7 +5665,7 @@ class describe_keyspaces_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5707,7 +5707,7 @@ class describe_cluster_name_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5766,7 +5766,7 @@ class describe_cluster_name_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5808,7 +5808,7 @@ class describe_version_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5867,7 +5867,7 @@ class describe_version_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5929,7 +5929,7 @@ class describe_ring_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -5967,7 +5967,7 @@ class describe_ring_result:
         if ftype == TType.LIST:
           self.success = []
           (_etype286, _size283) = iprot.readListBegin()
-          for _i287 in xrange(_size283):
+          for _i287 in range(_size283):
             _elem288 = TokenRange()
             _elem288.read(iprot)
             self.success.append(_elem288)
@@ -6010,7 +6010,7 @@ class describe_ring_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6052,7 +6052,7 @@ class describe_partitioner_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6111,7 +6111,7 @@ class describe_partitioner_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6153,7 +6153,7 @@ class describe_snitch_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6212,7 +6212,7 @@ class describe_snitch_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6274,7 +6274,7 @@ class describe_keyspace_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6360,7 +6360,7 @@ class describe_keyspace_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6464,7 +6464,7 @@ class describe_splits_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6502,7 +6502,7 @@ class describe_splits_result:
         if ftype == TType.LIST:
           self.success = []
           (_etype293, _size290) = iprot.readListBegin()
-          for _i294 in xrange(_size290):
+          for _i294 in range(_size290):
             _elem295 = iprot.readString();
             self.success.append(_elem295)
           iprot.readListEnd()
@@ -6544,7 +6544,7 @@ class describe_splits_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6607,7 +6607,7 @@ class system_add_column_family_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6692,7 +6692,7 @@ class system_add_column_family_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6754,7 +6754,7 @@ class system_drop_column_family_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6839,7 +6839,7 @@ class system_drop_column_family_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6902,7 +6902,7 @@ class system_add_keyspace_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -6987,7 +6987,7 @@ class system_add_keyspace_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7049,7 +7049,7 @@ class system_drop_keyspace_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7134,7 +7134,7 @@ class system_drop_keyspace_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7197,7 +7197,7 @@ class system_update_keyspace_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7282,7 +7282,7 @@ class system_update_keyspace_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7345,7 +7345,7 @@ class system_update_column_family_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7430,7 +7430,7 @@ class system_update_column_family_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7506,7 +7506,7 @@ class execute_cql_query_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7618,7 +7618,7 @@ class execute_cql_query_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7694,7 +7694,7 @@ class prepare_cql_query_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7767,7 +7767,7 @@ class prepare_cql_query_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7811,7 +7811,7 @@ class execute_prepared_cql_query_args:
         if ftype == TType.LIST:
           self.values = []
           (_etype300, _size297) = iprot.readListBegin()
-          for _i301 in xrange(_size297):
+          for _i301 in range(_size297):
             _elem302 = iprot.readString();
             self.values.append(_elem302)
           iprot.readListEnd()
@@ -7851,7 +7851,7 @@ class execute_prepared_cql_query_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -7963,7 +7963,7 @@ class execute_prepared_cql_query_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -8025,7 +8025,7 @@ class set_cql_version_args:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -8086,7 +8086,7 @@ class set_cql_version_result:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cassandra-thrift/v11/constants.py
----------------------------------------------------------------------
diff --git a/cassandra-thrift/v11/constants.py b/cassandra-thrift/v11/constants.py
index 7cf5771..8c5fdfe 100644
--- a/cassandra-thrift/v11/constants.py
+++ b/cassandra-thrift/v11/constants.py
@@ -7,6 +7,6 @@
 #
 
 from thrift.Thrift import TType, TMessageType, TException
-from ttypes import *
+from .ttypes import *
 
 VERSION = "19.30.0"

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cassandra-thrift/v11/ttypes.py
----------------------------------------------------------------------
diff --git a/cassandra-thrift/v11/ttypes.py b/cassandra-thrift/v11/ttypes.py
index 3b4ec8f..799a0d5 100644
--- a/cassandra-thrift/v11/ttypes.py
+++ b/cassandra-thrift/v11/ttypes.py
@@ -257,7 +257,7 @@ class Column:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -306,7 +306,7 @@ class SuperColumn:
         if ftype == TType.LIST:
           self.columns = []
           (_etype3, _size0) = iprot.readListBegin()
-          for _i4 in xrange(_size0):
+          for _i4 in range(_size0):
             _elem5 = Column()
             _elem5.read(iprot)
             self.columns.append(_elem5)
@@ -347,7 +347,7 @@ class SuperColumn:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -423,7 +423,7 @@ class CounterColumn:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -467,7 +467,7 @@ class CounterSuperColumn:
         if ftype == TType.LIST:
           self.columns = []
           (_etype10, _size7) = iprot.readListBegin()
-          for _i11 in xrange(_size7):
+          for _i11 in range(_size7):
             _elem12 = CounterColumn()
             _elem12.read(iprot)
             self.columns.append(_elem12)
@@ -508,7 +508,7 @@ class CounterSuperColumn:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -622,7 +622,7 @@ class ColumnOrSuperColumn:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -670,7 +670,7 @@ class NotFoundException(TException):
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -738,7 +738,7 @@ class InvalidRequestException(TException):
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -786,7 +786,7 @@ class UnavailableException(TException):
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -834,7 +834,7 @@ class TimedOutException(TException):
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -901,7 +901,7 @@ class AuthenticationException(TException):
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -968,7 +968,7 @@ class AuthorizationException(TException):
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1016,7 +1016,7 @@ class SchemaDisagreementException(TException):
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1097,7 +1097,7 @@ class ColumnParent:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1194,7 +1194,7 @@ class ColumnPath:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1313,7 +1313,7 @@ class SliceRange:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1363,7 +1363,7 @@ class SlicePredicate:
         if ftype == TType.LIST:
           self.column_names = []
           (_etype17, _size14) = iprot.readListBegin()
-          for _i18 in xrange(_size14):
+          for _i18 in range(_size14):
             _elem19 = iprot.readString();
             self.column_names.append(_elem19)
           iprot.readListEnd()
@@ -1405,7 +1405,7 @@ class SlicePredicate:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1495,7 +1495,7 @@ class IndexExpression:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1539,7 +1539,7 @@ class IndexClause:
         if ftype == TType.LIST:
           self.expressions = []
           (_etype24, _size21) = iprot.readListBegin()
-          for _i25 in xrange(_size21):
+          for _i25 in range(_size21):
             _elem26 = IndexExpression()
             _elem26.read(iprot)
             self.expressions.append(_elem26)
@@ -1596,7 +1596,7 @@ class IndexClause:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1674,7 +1674,7 @@ class KeyRange:
         if ftype == TType.LIST:
           self.row_filter = []
           (_etype31, _size28) = iprot.readListBegin()
-          for _i32 in xrange(_size28):
+          for _i32 in range(_size28):
             _elem33 = IndexExpression()
             _elem33.read(iprot)
             self.row_filter.append(_elem33)
@@ -1734,7 +1734,7 @@ class KeyRange:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1784,7 +1784,7 @@ class KeySlice:
         if ftype == TType.LIST:
           self.columns = []
           (_etype38, _size35) = iprot.readListBegin()
-          for _i39 in xrange(_size35):
+          for _i39 in range(_size35):
             _elem40 = ColumnOrSuperColumn()
             _elem40.read(iprot)
             self.columns.append(_elem40)
@@ -1825,7 +1825,7 @@ class KeySlice:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1901,7 +1901,7 @@ class KeyCount:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -1988,7 +1988,7 @@ class Deletion:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2066,7 +2066,7 @@ class Mutation:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2150,7 +2150,7 @@ class EndpointDetails:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2215,7 +2215,7 @@ class TokenRange:
         if ftype == TType.LIST:
           self.endpoints = []
           (_etype45, _size42) = iprot.readListBegin()
-          for _i46 in xrange(_size42):
+          for _i46 in range(_size42):
             _elem47 = iprot.readString();
             self.endpoints.append(_elem47)
           iprot.readListEnd()
@@ -2225,7 +2225,7 @@ class TokenRange:
         if ftype == TType.LIST:
           self.rpc_endpoints = []
           (_etype51, _size48) = iprot.readListBegin()
-          for _i52 in xrange(_size48):
+          for _i52 in range(_size48):
             _elem53 = iprot.readString();
             self.rpc_endpoints.append(_elem53)
           iprot.readListEnd()
@@ -2235,7 +2235,7 @@ class TokenRange:
         if ftype == TType.LIST:
           self.endpoint_details = []
           (_etype57, _size54) = iprot.readListBegin()
-          for _i58 in xrange(_size54):
+          for _i58 in range(_size54):
             _elem59 = EndpointDetails()
             _elem59.read(iprot)
             self.endpoint_details.append(_elem59)
@@ -2296,7 +2296,7 @@ class TokenRange:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2334,7 +2334,7 @@ class AuthenticationRequest:
         if ftype == TType.MAP:
           self.credentials = {}
           (_ktype64, _vtype65, _size63 ) = iprot.readMapBegin() 
-          for _i67 in xrange(_size63):
+          for _i67 in range(_size63):
             _key68 = iprot.readString();
             _val69 = iprot.readString();
             self.credentials[_key68] = _val69
@@ -2354,7 +2354,7 @@ class AuthenticationRequest:
     if self.credentials is not None:
       oprot.writeFieldBegin('credentials', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.credentials))
-      for kiter70,viter71 in self.credentials.items():
+      for kiter70,viter71 in list(self.credentials.items()):
         oprot.writeString(kiter70)
         oprot.writeString(viter71)
       oprot.writeMapEnd()
@@ -2370,7 +2370,7 @@ class AuthenticationRequest:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2438,7 +2438,7 @@ class ColumnDef:
         if ftype == TType.MAP:
           self.index_options = {}
           (_ktype73, _vtype74, _size72 ) = iprot.readMapBegin() 
-          for _i76 in xrange(_size72):
+          for _i76 in range(_size72):
             _key77 = iprot.readString();
             _val78 = iprot.readString();
             self.index_options[_key77] = _val78
@@ -2474,7 +2474,7 @@ class ColumnDef:
     if self.index_options is not None:
       oprot.writeFieldBegin('index_options', TType.MAP, 5)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.index_options))
-      for kiter79,viter80 in self.index_options.items():
+      for kiter79,viter80 in list(self.index_options.items()):
         oprot.writeString(kiter79)
         oprot.writeString(viter80)
       oprot.writeMapEnd()
@@ -2492,7 +2492,7 @@ class ColumnDef:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -2665,7 +2665,7 @@ class CfDef:
         if ftype == TType.LIST:
           self.column_metadata = []
           (_etype84, _size81) = iprot.readListBegin()
-          for _i85 in xrange(_size81):
+          for _i85 in range(_size81):
             _elem86 = ColumnDef()
             _elem86.read(iprot)
             self.column_metadata.append(_elem86)
@@ -2721,7 +2721,7 @@ class CfDef:
         if ftype == TType.MAP:
           self.compaction_strategy_options = {}
           (_ktype88, _vtype89, _size87 ) = iprot.readMapBegin() 
-          for _i91 in xrange(_size87):
+          for _i91 in range(_size87):
             _key92 = iprot.readString();
             _val93 = iprot.readString();
             self.compaction_strategy_options[_key92] = _val93
@@ -2732,7 +2732,7 @@ class CfDef:
         if ftype == TType.MAP:
           self.compression_options = {}
           (_ktype95, _vtype96, _size94 ) = iprot.readMapBegin() 
-          for _i98 in xrange(_size94):
+          for _i98 in range(_size94):
             _key99 = iprot.readString();
             _val100 = iprot.readString();
             self.compression_options[_key99] = _val100
@@ -2753,7 +2753,7 @@ class CfDef:
         if ftype == TType.LIST:
           self.column_aliases = []
           (_etype104, _size101) = iprot.readListBegin()
-          for _i105 in xrange(_size101):
+          for _i105 in range(_size101):
             _elem106 = iprot.readString();
             self.column_aliases.append(_elem106)
           iprot.readListEnd()
@@ -2939,7 +2939,7 @@ class CfDef:
     if self.compaction_strategy_options is not None:
       oprot.writeFieldBegin('compaction_strategy_options', TType.MAP, 30)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.compaction_strategy_options))
-      for kiter108,viter109 in self.compaction_strategy_options.items():
+      for kiter108,viter109 in list(self.compaction_strategy_options.items()):
         oprot.writeString(kiter108)
         oprot.writeString(viter109)
       oprot.writeMapEnd()
@@ -2951,7 +2951,7 @@ class CfDef:
     if self.compression_options is not None:
       oprot.writeFieldBegin('compression_options', TType.MAP, 32)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.compression_options))
-      for kiter110,viter111 in self.compression_options.items():
+      for kiter110,viter111 in list(self.compression_options.items()):
         oprot.writeString(kiter110)
         oprot.writeString(viter111)
       oprot.writeMapEnd()
@@ -2992,7 +2992,7 @@ class CfDef:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3053,7 +3053,7 @@ class KsDef:
         if ftype == TType.MAP:
           self.strategy_options = {}
           (_ktype114, _vtype115, _size113 ) = iprot.readMapBegin() 
-          for _i117 in xrange(_size113):
+          for _i117 in range(_size113):
             _key118 = iprot.readString();
             _val119 = iprot.readString();
             self.strategy_options[_key118] = _val119
@@ -3069,7 +3069,7 @@ class KsDef:
         if ftype == TType.LIST:
           self.cf_defs = []
           (_etype123, _size120) = iprot.readListBegin()
-          for _i124 in xrange(_size120):
+          for _i124 in range(_size120):
             _elem125 = CfDef()
             _elem125.read(iprot)
             self.cf_defs.append(_elem125)
@@ -3102,7 +3102,7 @@ class KsDef:
     if self.strategy_options is not None:
       oprot.writeFieldBegin('strategy_options', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.strategy_options))
-      for kiter126,viter127 in self.strategy_options.items():
+      for kiter126,viter127 in list(self.strategy_options.items()):
         oprot.writeString(kiter126)
         oprot.writeString(viter127)
       oprot.writeMapEnd()
@@ -3137,7 +3137,7 @@ class KsDef:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3183,7 +3183,7 @@ class CqlRow:
         if ftype == TType.LIST:
           self.columns = []
           (_etype132, _size129) = iprot.readListBegin()
-          for _i133 in xrange(_size129):
+          for _i133 in range(_size129):
             _elem134 = Column()
             _elem134.read(iprot)
             self.columns.append(_elem134)
@@ -3224,7 +3224,7 @@ class CqlRow:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3269,7 +3269,7 @@ class CqlMetadata:
         if ftype == TType.MAP:
           self.name_types = {}
           (_ktype137, _vtype138, _size136 ) = iprot.readMapBegin() 
-          for _i140 in xrange(_size136):
+          for _i140 in range(_size136):
             _key141 = iprot.readString();
             _val142 = iprot.readString();
             self.name_types[_key141] = _val142
@@ -3280,7 +3280,7 @@ class CqlMetadata:
         if ftype == TType.MAP:
           self.value_types = {}
           (_ktype144, _vtype145, _size143 ) = iprot.readMapBegin() 
-          for _i147 in xrange(_size143):
+          for _i147 in range(_size143):
             _key148 = iprot.readString();
             _val149 = iprot.readString();
             self.value_types[_key148] = _val149
@@ -3310,7 +3310,7 @@ class CqlMetadata:
     if self.name_types is not None:
       oprot.writeFieldBegin('name_types', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.name_types))
-      for kiter150,viter151 in self.name_types.items():
+      for kiter150,viter151 in list(self.name_types.items()):
         oprot.writeString(kiter150)
         oprot.writeString(viter151)
       oprot.writeMapEnd()
@@ -3318,7 +3318,7 @@ class CqlMetadata:
     if self.value_types is not None:
       oprot.writeFieldBegin('value_types', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.value_types))
-      for kiter152,viter153 in self.value_types.items():
+      for kiter152,viter153 in list(self.value_types.items()):
         oprot.writeString(kiter152)
         oprot.writeString(viter153)
       oprot.writeMapEnd()
@@ -3348,7 +3348,7 @@ class CqlMetadata:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3398,7 +3398,7 @@ class CqlResult:
         if ftype == TType.LIST:
           self.rows = []
           (_etype157, _size154) = iprot.readListBegin()
-          for _i158 in xrange(_size154):
+          for _i158 in range(_size154):
             _elem159 = CqlRow()
             _elem159.read(iprot)
             self.rows.append(_elem159)
@@ -3456,7 +3456,7 @@ class CqlResult:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):
@@ -3508,7 +3508,7 @@ class CqlPreparedResult:
         if ftype == TType.LIST:
           self.variable_types = []
           (_etype164, _size161) = iprot.readListBegin()
-          for _i165 in xrange(_size161):
+          for _i165 in range(_size161):
             _elem166 = iprot.readString();
             self.variable_types.append(_elem166)
           iprot.readListEnd()
@@ -3552,7 +3552,7 @@ class CqlPreparedResult:
 
   def __repr__(self):
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
   def __eq__(self, other):

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cdc_test.py
----------------------------------------------------------------------
diff --git a/cdc_test.py b/cdc_test.py
index 657e61a..28b66d1 100644
--- a/cdc_test.py
+++ b/cdc_test.py
@@ -1,26 +1,29 @@
-from __future__ import division
-
 import errno
 import os
 import shutil
 import time
 import uuid
 from collections import namedtuple
-from itertools import izip as zip
+import pytest
+import logging
+
 from itertools import repeat
 
 from cassandra import WriteFailure
 from cassandra.concurrent import (execute_concurrent,
                                   execute_concurrent_with_args)
 from ccmlib.node import Node
-from nose.tools import assert_equal, assert_less_equal
 
-from dtest import Tester, create_ks, debug
+from dtest import Tester, create_ks
 from tools.data import rows_to_list
-from tools.decorators import since
 from tools.files import size_of_files_in_dir
 from tools.funcutils import get_rate_limited_function
 from tools.hacks import advance_to_next_cl_segment
+from tools.assertions import assert_lists_equal_ignoring_order
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 _16_uuid_column_spec = (
     'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, e uuid, f uuid, g uuid, '
@@ -36,10 +39,10 @@ def _insert_rows(session, table_name, insert_stmt, values):
                        concurrency=500, raise_on_first_error=True)
 
     data_loaded = rows_to_list(session.execute('SELECT * FROM ' + table_name))
-    debug('{n} rows inserted into {table_name}'.format(n=len(data_loaded), table_name=table_name))
+    logger.debug('{n} rows inserted into {table_name}'.format(n=len(data_loaded), table_name=table_name))
     # use assert_equal over assert_length_equal to avoid printing out
     # potentially large lists
-    assert_equal(len(values), len(data_loaded))
+    assert len(values) == len(data_loaded)
     return data_loaded
 
 
@@ -48,7 +51,7 @@ def _move_contents(source_dir, dest_dir, verbose=True):
         source_path, dest_path = (os.path.join(source_dir, source_filename),
                                   os.path.join(dest_dir, source_filename))
         if verbose:
-            debug('moving {} to {}'.format(source_path, dest_path))
+            logger.debug('moving {} to {}'.format(source_path, dest_path))
         shutil.move(source_path, dest_path)
 
 
@@ -64,7 +67,7 @@ def _get_16_uuid_insert_stmt(ks_name, table_name):
 
 def _get_create_table_statement(ks_name, table_name, column_spec, options=None):
     if options:
-        options_pairs = ('{k}={v}'.format(k=k, v=v) for (k, v) in options.iteritems())
+        options_pairs = ('{k}={v}'.format(k=k, v=v) for (k, v) in options.items())
         options_string = 'WITH ' + ' AND '.join(options_pairs)
     else:
         options_string = ''
@@ -78,23 +81,21 @@ def _get_create_table_statement(ks_name, table_name, column_spec, options=None):
 def _write_to_cdc_WriteFailure(session, insert_stmt):
     prepared = session.prepare(insert_stmt)
     start, rows_loaded, error_found = time.time(), 0, False
-    rate_limited_debug = get_rate_limited_function(debug, 5)
+    rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
     while not error_found:
         # We want to fail if inserting data takes too long. Locally this
         # takes about 10s, but let's be generous.
-        assert_less_equal(
-            (time.time() - start), 600,
-            "It's taken more than 10 minutes to reach a WriteFailure trying "
-            'to overrun the space designated for CDC commitlogs. This could '
-            "be because data isn't being written quickly enough in this "
-            'environment, or because C* is failing to reject writes when '
-            'it should.'
-        )
+        assert (
+            (time.time() - start) <= 600), "It's taken more than 10 minutes to reach a WriteFailure trying " + \
+            "to overrun the space designated for CDC commitlogs. This could " + \
+            "be because data isn't being written quickly enough in this " + \
+            "environment, or because C* is failing to reject writes when " + \
+            "it should."
 
         # If we haven't logged from here in the last 5s, do so.
-        rate_limited_debug(
-            '  data load step has lasted {s:.2f}s, '
-            'loaded {r} rows'.format(s=(time.time() - start), r=rows_loaded))
+        rate_limited_debug_logger(
+            "  data load step has lasted {s:.2f}s, " +
+            "loaded {r} rows".format(s=(time.time() - start), r=rows_loaded))
 
         batch_results = list(execute_concurrent(
             session,
@@ -111,13 +112,12 @@ def _write_to_cdc_WriteFailure(session, insert_stmt):
         rows_loaded += len([br for br in batch_results if br[0]])
         # then, we make sure that the only failures are the expected
         # WriteFailures.
-        assert_equal([],
-                     [result for (success, result) in batch_results
-                      if not success and not isinstance(result, WriteFailure)])
+        assert [] == [result for (success, result) in batch_results
+                       if not success and not isinstance(result, WriteFailure)]
         # Finally, if we find a WriteFailure, that means we've inserted all
         # the CDC data we can and so we flip error_found to exit the loop.
         if any(isinstance(result, WriteFailure) for (_, result) in batch_results):
-            debug("write failed (presumably because we've overrun "
+            logger.debug("write failed (presumably because we've overrun "
                   'designated CDC commitlog space) after '
                   'loading {r} rows in {s:.2f}s'.format(
                       r=rows_loaded,
@@ -162,7 +162,7 @@ def _set_cdc_on_table(session, table_name, value, ks_name=None):
     value_string = 'true' if value else 'false'
     stmt = 'ALTER TABLE ' + table_string + ' WITH CDC = ' + value_string
 
-    debug(stmt)
+    logger.debug(stmt)
     session.execute(stmt)
 
 
@@ -212,12 +212,12 @@ class TestCDC(Tester):
         down.
         """
         if verbose:
-            debug('creating ' + dir_name)
+            logger.debug('creating ' + dir_name)
         try:
             os.mkdir(dir_name)
         except OSError as e:
             if e.errno != errno.EEXIST:
-                debug(dir_name + ' already exists. removing and recreating.')
+                logger.debug(dir_name + ' already exists. removing and recreating.')
                 shutil.rmtree(dir_name)
                 os.mkdir(dir_name)
             else:
@@ -225,7 +225,7 @@ class TestCDC(Tester):
 
         def debug_and_rmtree():
             shutil.rmtree(dir_name)
-            debug(dir_name + ' removed')
+            logger.debug(dir_name + ' removed')
 
         self.addCleanup(debug_and_rmtree)
 
@@ -259,8 +259,8 @@ class TestCDC(Tester):
         create_ks(session, ks_name, rf=1)
 
         if table_name is not None:
-            self.assertIsNotNone(cdc_enabled_table, 'if creating a table in prepare, must specify whether or not CDC is enabled on it')
-            self.assertIsNotNone(column_spec, 'if creating a table in prepare, must specify its schema')
+            assert cdc_enabled_table is not None, 'if creating a table in prepare is not None, must specify whether or not CDC is enabled on it'
+            assert column_spec is not None, 'if creating a table in prepare is not None, must specify its schema'
             options = {}
             if gc_grace_seconds is not None:
                 options['gc_grace_seconds'] = gc_grace_seconds
@@ -272,7 +272,7 @@ class TestCDC(Tester):
                 ks_name, table_name, column_spec,
                 options=options
             )
-            debug(stmt)
+            logger.debug(stmt)
             session.execute(stmt)
 
         return node, session
@@ -298,11 +298,11 @@ class TestCDC(Tester):
         execute_concurrent_with_args(session, insert_stmt, data)
 
         # We need data to be in commitlogs, not sstables.
-        self.assertEqual([], list(node.get_sstables(ks_name, table_name)))
+        assert [] == list(node.get_sstables(ks_name, table_name))
 
         for enable in alter_path:
             set_cdc(enable)
-            self.assertItemsEqual(session.execute('SELECT * FROM ' + table_name), data)
+            assert_lists_equal_ignoring_order(session.execute('SELECT * FROM ' + table_name), data)
 
     def test_cdc_enabled_data_readable_on_round_trip(self):
         """
@@ -318,6 +318,7 @@ class TestCDC(Tester):
         """
         self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=False)
 
+    @pytest.mark.skip(reason="Test always fails so skipping until fixed. Tracked with CASSANDRA-14146")
     def test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space(self):
         """
         Test that C* behaves correctly when CDC tables have consumed all the
@@ -345,6 +346,11 @@ class TestCDC(Tester):
             # Make CDC space as small as possible so we can fill it quickly.
             'cdc_total_space_in_mb': 4,
         }
+
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            r'org.apache.cassandra.exceptions.CDCWriteException: Rejecting mutation to keyspace ks. '
+            r'Free up space in .* by processing CDC logs']
+
         node, session = self.prepare(
             ks_name=ks_name,
             configuration_overrides=configuration_overrides
@@ -373,26 +379,25 @@ class TestCDC(Tester):
         # Here, we insert values into the first CDC table until we get a
         # WriteFailure. This should happen when the CDC commitlogs take up 1MB
         # or more.
-        debug('flushing non-CDC commitlogs')
+        logger.debug('flushing non-CDC commitlogs')
         node.flush()
         # Then, we insert rows into the CDC table until we can't anymore.
-        debug('beginning data insert to fill CDC commitlogs')
+        logger.debug('beginning data insert to fill CDC commitlogs')
         rows_loaded = _write_to_cdc_WriteFailure(session, full_cdc_table_info.insert_stmt)
 
-        self.assertLess(0, rows_loaded,
-                        'No CDC rows inserted. This may happen when '
-                        'cdc_total_space_in_mb > commitlog_segment_size_in_mb')
+        assert 0 < rows_loaded, 'No CDC rows inserted. ' \
+                                'This may happen when cdc_total_space_in_mb > commitlog_segment_size_in_mb'
 
         commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
         commitlogs_size = size_of_files_in_dir(commitlog_dir)
-        debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size))
+        logger.debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size))
 
         # We should get a WriteFailure when trying to write to the CDC table
         # that's filled the designated CDC space...
-        with self.assertRaises(WriteFailure):
+        with pytest.raises(WriteFailure):
             session.execute(full_cdc_table_info.insert_stmt)
         # or any CDC table.
-        with self.assertRaises(WriteFailure):
+        with pytest.raises(WriteFailure):
             session.execute(empty_cdc_table_info.insert_stmt)
 
         # Now we test for behaviors of non-CDC tables when we've exceeded
@@ -422,17 +427,15 @@ class TestCDC(Tester):
         #
         # First, write to non-cdc tables.
         start, time_limit = time.time(), 600
-        rate_limited_debug = get_rate_limited_function(debug, 5)
-        debug('writing to non-cdc table')
+        rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
+        logger.debug('writing to non-cdc table')
         # We write until we get a new commitlog segment.
         while _get_commitlog_files(node.get_path()) <= pre_non_cdc_write_segments:
             elapsed = time.time() - start
-            rate_limited_debug('  non-cdc load step has lasted {s:.2f}s'.format(s=elapsed))
-            self.assertLessEqual(
-                elapsed, time_limit,
-                "It's been over a {s}s and we haven't written a new "
-                "commitlog segment. Something is wrong.".format(s=time_limit)
-            )
+            rate_limited_debug_logger('  non-cdc load step has lasted {s:.2f}s'.format(s=elapsed))
+            assert elapsed <= time_limit, \
+                "It's been over a {s}s and we haven't written a new commitlog segment. Something is wrong.".format(s=time_limit)
+
             execute_concurrent(
                 session,
                 ((non_cdc_prepared_insert, ()) for _ in range(1000)),
@@ -443,7 +446,7 @@ class TestCDC(Tester):
         # Finally, we check that draining doesn't move any new segments to cdc_raw:
         node.drain()
         session.cluster.shutdown()
-        self.assertEqual(pre_non_cdc_write_cdc_raw_segments, _get_cdc_raw_files(node.get_path()))
+        assert pre_non_cdc_write_cdc_raw_segments == _get_cdc_raw_files(node.get_path())
 
     def _init_new_loading_node(self, ks_name, create_stmt, use_thrift=False):
         loading_node = Node(
@@ -457,16 +460,16 @@ class TestCDC(Tester):
             initial_token=None,
             binary_interface=('127.0.0.2', 9042)
         )
-        debug('adding node')
+        logger.debug('adding node')
         self.cluster.add(loading_node, is_seed=True)
-        debug('starting new node')
+        logger.debug('starting new node')
         loading_node.start(wait_for_binary_proto=True)
-        debug('recreating ks and table')
+        logger.debug('recreating ks and table')
         loading_session = self.patient_exclusive_cql_connection(loading_node)
         create_ks(loading_session, ks_name, rf=1)
-        debug('creating new table')
+        logger.debug('creating new table')
         loading_session.execute(create_stmt)
-        debug('stopping new node')
+        logger.debug('stopping new node')
         loading_node.stop()
         loading_session.cluster.shutdown()
         return loading_node
@@ -502,9 +505,9 @@ class TestCDC(Tester):
         inserted_rows = _insert_rows(generation_session, cdc_table_info.name, cdc_table_info.insert_stmt, repeat((), 10000))
 
         # drain the node to guarantee all cl segements will be recycled
-        debug('draining')
+        logger.debug('draining')
         generation_node.drain()
-        debug('stopping')
+        logger.debug('stopping')
         # stop the node and clean up all sessions attached to it
         generation_node.stop()
         generation_session.cluster.shutdown()
@@ -515,31 +518,25 @@ class TestCDC(Tester):
         # move cdc_raw contents to commitlog directories, then start the
         # node again to trigger commitlog replay, which should replay the
         # cdc_raw files we moved to commitlogs into memtables.
-        debug('moving cdc_raw and restarting node')
+        logger.debug('moving cdc_raw and restarting node')
         _move_contents(
             os.path.join(generation_node.get_path(), 'cdc_raw'),
             os.path.join(loading_node.get_path(), 'commitlogs')
         )
         loading_node.start(wait_for_binary_proto=True)
-        debug('node successfully started; waiting on log replay')
+        logger.debug('node successfully started; waiting on log replay')
         loading_node.grep_log('Log replay complete')
-        debug('log replay complete')
+        logger.debug('log replay complete')
 
         # final assertions
         validation_session = self.patient_exclusive_cql_connection(loading_node)
         data_in_cdc_table_after_restart = rows_to_list(
             validation_session.execute('SELECT * FROM ' + cdc_table_info.name)
         )
-        debug('found {cdc} values in CDC table'.format(
+        logger.debug('found {cdc} values in CDC table'.format(
             cdc=len(data_in_cdc_table_after_restart)
         ))
         # Then we assert that the CDC data that we expect to be there is there.
         # All data that was in CDC tables should have been copied to cdc_raw,
         # then used in commitlog replay, so it should be back in the cluster.
-        self.assertEqual(
-            inserted_rows,
-            data_in_cdc_table_after_restart,
-            # The message on failure is too long, since cdc_data is thousands
-            # of items, so we print something else here
-            msg='not all expected data selected'
-        )
+        assert inserted_rows == data_in_cdc_table_after_restart, 'not all expected data selected'

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cfid_test.py
----------------------------------------------------------------------
diff --git a/cfid_test.py b/cfid_test.py
index 7455902..87625e0 100644
--- a/cfid_test.py
+++ b/cfid_test.py
@@ -1,12 +1,17 @@
 import os
+import logging
 
 from dtest import Tester, create_ks, create_cf
 
+logger = logging.getLogger(__name__)
+
 
 class TestCFID(Tester):
 
-    def cfid_test(self):
-        """ Test through adding/dropping cf's that the path to sstables for each cf are unique and formatted correctly """
+    def test_cfid(self):
+        """ Test through adding/dropping cf's that the path to sstables for each cf are unique
+        and formatted correctly
+        """
         cluster = self.cluster
 
         cluster.populate(1).start(wait_other_notice=True)
@@ -29,8 +34,8 @@ class TestCFID(Tester):
             self.fail("Path to sstables not valid.")
 
         # check that there are 5 unique directories
-        self.assertEqual(len(cfs), 5)
+        assert len(cfs) == 5
 
         # check that these are in fact column family directories
         for dire in cfs:
-            self.assertTrue(dire[0:2] == 'cf')
+            assert dire[0:2] == 'cf'


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[29/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cqlsh_tests/cqlsh_copy_tests.py
----------------------------------------------------------------------
diff --git a/cqlsh_tests/cqlsh_copy_tests.py b/cqlsh_tests/cqlsh_copy_tests.py
index 8501497..9769fd7 100644
--- a/cqlsh_tests/cqlsh_copy_tests.py
+++ b/cqlsh_tests/cqlsh_copy_tests.py
@@ -1,4 +1,3 @@
-# coding: utf-8
 import csv
 import datetime
 import glob
@@ -7,6 +6,9 @@ import os
 import re
 import sys
 import time
+import pytest
+import logging
+
 from collections import namedtuple
 from contextlib import contextmanager
 from decimal import Decimal
@@ -22,15 +24,17 @@ from cassandra.murmur3 import murmur3
 from cassandra.util import SortedSet
 from ccmlib.common import is_win
 
-from cqlsh_tools import (DummyColorMap, assert_csvs_items_equal, csv_rows,
+from .cqlsh_tools import (DummyColorMap, assert_csvs_items_equal, csv_rows,
                          monkeypatch_driver, random_list, unmonkeypatch_driver,
                          write_rows_to_csv)
-from dtest import (DISABLE_VNODES, Tester, debug, warning, create_ks)
+from dtest import (Tester, create_ks)
 from tools.data import rows_to_list
-from tools.decorators import since
 from tools.metadata_wrapper import (UpdatingClusterMetadataWrapper,
                                     UpdatingTableMetadataWrapper)
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 PARTITIONERS = {
     "murmur3": "org.apache.cassandra.dht.Murmur3Partitioner",
     "random": "org.apache.cassandra.dht.RandomPartitioner",
@@ -54,7 +58,7 @@ class UTC(datetime.tzinfo):
         return datetime.timedelta(0)
 
 
-class CqlshCopyTest(Tester):
+class TestCqlshCopy(Tester):
     """
     Tests the COPY TO and COPY FROM features in cqlsh.
     @jira_ticket CASSANDRA-3906
@@ -74,7 +78,7 @@ class CqlshCopyTest(Tester):
 
     def tearDown(self):
         self.delete_temp_files()
-        super(CqlshCopyTest, self).tearDown()
+        super(TestCqlshCopy, self).tearDown()
 
     def get_temp_file(self, prefix=template, suffix=""):
         """
@@ -114,9 +118,9 @@ class CqlshCopyTest(Tester):
                 self.cluster.set_configuration_options(values=configuration_options)
             self.cluster.populate(nodes, tokens=tokens).start(wait_for_binary_proto=True)
         else:
-            self.assertEqual(self.cluster.partitioner, p, "Cannot reuse cluster: different partitioner")
-            self.assertEqual(len(self.cluster.nodelist()), nodes, "Cannot reuse cluster: different number of nodes")
-            self.assertIsNone(configuration_options)
+            assert self.cluster.partitioner, p == "Cannot reuse cluster: different partitioner"
+            assert len(self.cluster.nodelist()), nodes == "Cannot reuse cluster: different number of nodes"
+            assert configuration_options is None
 
         self.node1 = self.cluster.nodelist()[0]
 
@@ -185,8 +189,8 @@ class CqlshCopyTest(Tester):
             ret = self.node1.run_cqlsh(cmds=cmds, cqlsh_options=cqlsh_options)
 
         if show_output:
-            debug('Output:\n{}'.format(ret[0]))  # show stdout of copy cmd
-            debug('Errors:\n{}'.format(ret[1]))  # show stderr of copy cmd
+            logger.debug('Output:\n{}'.format(ret[0]))  # show stdout of copy cmd
+            logger.debug('Errors:\n{}'.format(ret[1]))  # show stderr of copy cmd
 
         return ret
 
@@ -268,7 +272,7 @@ class CqlshCopyTest(Tester):
             Return a quoted string representation for strings, unicode and date time parameters,
             otherwise return a string representation of the parameter.
             """
-            return "'{}'".format(s) if isinstance(s, (str, unicode, Datetime)) else str(s)
+            return "'{}'".format(s) if isinstance(s, (str, Datetime)) else str(s)
 
         class ImmutableDict(frozenset):
             iteritems = frozenset.__iter__
@@ -378,31 +382,31 @@ class CqlshCopyTest(Tester):
 
         self.maxDiff = None
         try:
-            self.assertItemsEqual(csv_results, processed_results)
+            assert csv_results == processed_results
         except Exception as e:
             if len(csv_results) != len(processed_results):
-                warning("Different # of entries. CSV: " + str(len(csv_results)) +
+                logger.warning("Different # of entries. CSV: " + str(len(csv_results)) +
                         " vs results: " + str(len(processed_results)))
             elif csv_results[0] is not None:
                 for x in range(0, len(csv_results[0])):
                     if csv_results[0][x] != processed_results[0][x]:
-                        warning("Mismatch at index: " + str(x))
-                        warning("Value in csv: " + str(csv_results[0][x]))
-                        warning("Value in result: " + str(processed_results[0][x]))
+                        logger.warning("Mismatch at index: " + str(x))
+                        logger.warning("Value in csv: " + str(csv_results[0][x]))
+                        logger.warning("Value in result: " + str(processed_results[0][x]))
             raise e
 
     def make_csv_formatter(self, time_format, nullval):
         with self._cqlshlib() as cqlshlib:  # noqa
             from cqlshlib.formatting import format_value, format_value_default
             from cqlshlib.displaying import NO_COLOR_MAP
-            try:
-                from cqlshlib.formatting import DateTimeFormat
-                date_time_format = DateTimeFormat()
-                date_time_format.timestamp_format = time_format
-                if hasattr(date_time_format, 'milliseconds_only'):
-                    date_time_format.milliseconds_only = True
-            except ImportError:
-                date_time_format = None
+        try:
+            from cqlshlib.formatting import DateTimeFormat
+            date_time_format = DateTimeFormat()
+            date_time_format.timestamp_format = time_format
+            if hasattr(date_time_format, 'milliseconds_only'):
+                date_time_format.milliseconds_only = True
+        except ImportError:
+            date_time_format = None
 
         encoding_name = 'utf-8'  # codecs.lookup(locale.getpreferredencoding()).name
         color_map = DummyColorMap()
@@ -424,7 +428,7 @@ class CqlshCopyTest(Tester):
                 format_fn = format_value
 
             if val is None or val == EMPTY or val == nullval:
-                return format_value_default(nullval, colormap=NO_COLOR_MAP)
+                return format_value_default(nullval)
 
             # CASSANDRA-11255 increased COPY TO DOUBLE PRECISION TO 12
             if cql_type_name == 'double' and self.cluster.version() >= LooseVersion('3.6'):
@@ -499,7 +503,7 @@ class CqlshCopyTest(Tester):
         results = list(self.session.execute("SELECT * FROM testlist"))
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
         self.run_cqlsh(cmds="COPY ks.testlist TO '{name}'".format(name=tempfile.name))
 
         self.assertCsvResultEqual(tempfile.name, results, 'testlist')
@@ -526,7 +530,7 @@ class CqlshCopyTest(Tester):
         results = list(self.session.execute("SELECT * FROM testtuple"))
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
         self.run_cqlsh(cmds="COPY ks.testtuple TO '{name}'".format(name=tempfile.name))
 
         self.assertCsvResultEqual(tempfile.name, results, 'testtuple')
@@ -554,7 +558,7 @@ class CqlshCopyTest(Tester):
         results = list(self.session.execute("SELECT * FROM testdelimiter"))
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.testdelimiter TO '{name}'".format(name=tempfile.name)
         cmds += " WITH DELIMITER = '{d}'".format(d=delimiter)
         self.run_cqlsh(cmds=cmds)
@@ -617,13 +621,13 @@ class CqlshCopyTest(Tester):
             copy_from_options['NULL'] = indicator
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.testnullindicator TO '{name}'".format(name=tempfile.name)
         if indicator:
             cmds += " WITH NULL = '{d}'".format(d=indicator)
-        debug(cmds)
+        logger.debug(cmds)
         out, _, _ = self.run_cqlsh(cmds=cmds)
-        debug(out)
+        logger.debug(out)
 
         results = list(self.session.execute("SELECT * FROM ks.testnullindicator"))
         results_with_null_indicator = [[indicator if value is None else value for value in row] for row in results]
@@ -632,18 +636,18 @@ class CqlshCopyTest(Tester):
 
         # Now import back the csv file
         self.session.execute('TRUNCATE ks.testnullindicator')
-        debug('Importing from csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.testnullindicator FROM '{name}'".format(name=tempfile.name)
         if copy_from_options:
             first = True
-            for k, v in copy_from_options.iteritems():
+            for k, v in copy_from_options.items():
                 cmds += ' {} {} = {}'.format('WITH' if first else 'AND', k, v)
                 first = False
-        debug(cmds)
+        logger.debug(cmds)
         self.run_cqlsh(cmds=cmds)
 
         results_imported = list(self.session.execute("SELECT * FROM ks.testnullindicator"))
-        self.assertEquals(results, results_imported)
+        assert results == results_imported
 
     def test_default_null_indicator(self):
         """
@@ -701,12 +705,12 @@ class CqlshCopyTest(Tester):
             writer.writerow({'a': 1, 'b': "[1,2,3]"})
             writer.writerow({'a': 2, 'b': "[1,,3]"})
 
-        debug('Importing from csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.testnullvalsincollections FROM '{name}'".format(name=tempfile.name)
         out, err, _ = self.run_cqlsh(cmds=cmds)
-        debug(out)
-        debug(err)
-        self.assertIn("ParseError - Failed to parse [1,,3] : Empty values are not allowed", err)
+        logger.debug(out)
+        logger.debug(err)
+        assert "ParseError - Failed to parse [1,,3] : Empty values are not allowed" in err
 
     def test_writing_use_header(self):
         """
@@ -729,7 +733,7 @@ class CqlshCopyTest(Tester):
         execute_concurrent_with_args(self.session, insert_statement, args)
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.testheader TO '{name}'".format(name=tempfile.name)
         cmds += " WITH HEADER = true"
         self.run_cqlsh(cmds=cmds)
@@ -737,8 +741,7 @@ class CqlshCopyTest(Tester):
         with open(tempfile.name, 'r') as csvfile:
             csv_values = list(csv.reader(csvfile))
 
-        self.assertItemsEqual(csv_values,
-                              [['a', 'b'], ['1', '10'], ['2', '20'], ['3', '30']])
+        assert csv_values == [['a', 'b'], ['1', '10'], ['2', '20'], ['3', '30']]
 
     def _test_reading_counter_template(self, copy_options=None):
         """
@@ -772,14 +775,14 @@ class CqlshCopyTest(Tester):
         cmds = "COPY ks.testcounter FROM '{name}'".format(name=tempfile.name)
         cmds += " WITH HEADER = true"
         if copy_options:
-            for opt, val in copy_options.iteritems():
+            for opt, val in copy_options.items():
                 cmds += " AND {} = {}".format(opt, val)
 
-        debug("Running {}".format(cmds))
+        logger.debug("Running {}".format(cmds))
         self.run_cqlsh(cmds=cmds)
 
         result = self.session.execute("SELECT * FROM testcounter")
-        self.assertItemsEqual(data, rows_to_list(result))
+        assert data == rows_to_list(result)
 
     def test_reading_counter(self):
         """
@@ -830,8 +833,7 @@ class CqlshCopyTest(Tester):
         self.run_cqlsh(cmds=cmds)
 
         result = self.session.execute("SELECT * FROM testheader")
-        self.assertItemsEqual([tuple(d) for d in data],
-                              [tuple(r) for r in rows_to_list(result)])
+        assert [tuple(d) for d in data] == [tuple(r) for r in rows_to_list(result)]
 
     def test_datetimeformat_round_trip(self):
         """
@@ -864,7 +866,7 @@ class CqlshCopyTest(Tester):
         format = '%Y/%m/%d %H:%M'
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.testdatetimeformat TO '{name}'".format(name=tempfile.name)
         cmds += " WITH DATETIMEFORMAT = '{}'".format(format)
         self.run_cqlsh(cmds=cmds)
@@ -872,10 +874,9 @@ class CqlshCopyTest(Tester):
         with open(tempfile.name, 'r') as csvfile:
             csv_values = list(csv.reader(csvfile))
 
-        self.assertItemsEqual(csv_values,
-                              [['1', '2015/01/01 07:00'],
+        assert csv_values == [['1', '2015/01/01 07:00'],
                                ['2', '2015/06/10 12:30'],
-                               ['3', '2015/12/31 23:59']])
+                               ['3', '2015/12/31 23:59']]
 
         self.session.execute("TRUNCATE testdatetimeformat")
         cmds = "COPY ks.testdatetimeformat FROM '{name}'".format(name=tempfile.name)
@@ -888,8 +889,8 @@ class CqlshCopyTest(Tester):
         cql_type_names = [table_meta.columns[c].cql_type for c in table_meta.columns]
 
         imported_results = list(self.session.execute("SELECT * FROM testdatetimeformat"))
-        self.assertItemsEqual(self.result_to_csv_rows(exported_results, cql_type_names, time_format=format),
-                              self.result_to_csv_rows(imported_results, cql_type_names, time_format=format))
+        assert self.result_to_csv_rows(exported_results, cql_type_names, time_format=format) \
+               == self.result_to_csv_rows(imported_results, cql_type_names, time_format=format)
 
     @since('3.2')
     def test_reading_with_ttl(self):
@@ -922,12 +923,12 @@ class CqlshCopyTest(Tester):
         self.run_cqlsh(cmds="COPY ks.testttl FROM '{name}' WITH TTL = '5'".format(name=tempfile.name))
 
         result = rows_to_list(self.session.execute("SELECT * FROM testttl"))
-        self.assertItemsEqual(data, result)
+        assert data == result
 
         time.sleep(10)
 
         result = rows_to_list(self.session.execute("SELECT * FROM testttl"))
-        self.assertItemsEqual([], result)
+        assert [] == result
 
     def test_reading_with_skip_and_max_rows(self):
         """
@@ -946,18 +947,18 @@ class CqlshCopyTest(Tester):
         stress_table = 'keyspace1.standard1'
         num_file_rows = 10000
 
-        debug('Running stress to generate a large CSV via COPY TO')
+        logger.debug('Running stress to generate a large CSV via COPY TO')
         self.node1.stress(['write', 'n={}'.format(num_file_rows), 'no-warmup', '-rate', 'threads=50'])
         self.run_cqlsh(cmds="COPY {} TO '{}'".format(stress_table, tempfile.name))
-        self.assertEqual(num_file_rows, len(open(tempfile.name).readlines()))
+        assert num_file_rows == len(open(tempfile.name).readlines())
 
         def do_test(num_rows, skip_rows):
-            debug('Preparing to test {} max rows and {} skip rows by truncating table'.format(num_rows, skip_rows))
+            logger.debug('Preparing to test {} max rows and {} skip rows by truncating table'.format(num_rows, skip_rows))
             self.session.execute("TRUNCATE {}".format(stress_table))
             result = rows_to_list(self.session.execute("SELECT * FROM {}".format(stress_table)))
-            self.assertItemsEqual([], result)
+            assert [] == result
 
-            debug('Importing {} rows'.format(num_rows))
+            logger.debug('Importing {} rows'.format(num_rows))
             self.run_cqlsh(cmds="COPY {} FROM '{}' WITH MAXROWS = '{}' AND SKIPROWS='{}'"
                            .format(stress_table, tempfile.name, num_rows, skip_rows))
 
@@ -965,7 +966,7 @@ class CqlshCopyTest(Tester):
             expected_rows -= min(num_file_rows, max(0, skip_rows))
             self.assertEqual([[expected_rows]],
                              rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}".format(stress_table))))
-            debug('Imported {} as expected'.format(expected_rows))
+            logger.debug('Imported {} as expected'.format(expected_rows))
 
         # max rows tests
         do_test(-1, 0)
@@ -1014,11 +1015,11 @@ class CqlshCopyTest(Tester):
 
         def do_test(skip_cols, expected_results):
             self.session.execute('TRUNCATE ks.testskipcols')
-            debug("Importing csv file {} with skipcols '{}'".format(tempfile, skip_cols))
+            logger.debug("Importing csv file {} with skipcols '{}'".format(tempfile, skip_cols))
             out, err, _ = self.run_cqlsh(cmds="COPY ks.testskipcols FROM '{}' WITH SKIPCOLS = '{}'"
                                          .format(tempfile.name, skip_cols))
-            debug(out)
-            self.assertItemsEqual(expected_results, rows_to_list(self.session.execute("SELECT * FROM ks.testskipcols")))
+            logger.debug(out)
+            assert expected_results == rows_to_list(self.session.execute("SELECT * FROM ks.testskipcols"))
 
         do_test('c, d ,e', [[1, 2, None, None, None], [6, 7, None, None, None]])
         do_test('b,', [[1, None, 3, 4, 5], [6, None, 8, 9, 10]])
@@ -1062,11 +1063,11 @@ class CqlshCopyTest(Tester):
                 writer.writerow({'a': a, 'b': b, 'c': c, 'd': d, 'e': e})
 
         def do_test(skip_cols, expected_results):
-            debug("Importing csv file {} with skipcols '{}'".format(tempfile, skip_cols))
+            logger.debug("Importing csv file {} with skipcols '{}'".format(tempfile, skip_cols))
             out, err, _ = self.run_cqlsh(cmds="COPY ks.testskipcols FROM '{}' WITH SKIPCOLS = '{}'"
                                          .format(tempfile.name, skip_cols))
-            debug(out)
-            self.assertItemsEqual(expected_results, rows_to_list(self.session.execute("SELECT * FROM ks.testskipcols")))
+            logger.debug(out)
+            assert expected_results == rows_to_list(self.session.execute("SELECT * FROM ks.testskipcols"))
 
         do_test('c, d ,e', [[1, 1, None, None, None], [2, 1, None, None, None]])
         do_test('b', [[1, 1, 1, 1, 1], [2, 1, 1, 1, 1]])
@@ -1098,10 +1099,10 @@ class CqlshCopyTest(Tester):
         self.session.execute("CREATE TABLE testtokens(a text primary key)")
 
         insert_statement = self.session.prepare("INSERT INTO testtokens (a) VALUES (?)")
-        execute_concurrent_with_args(self.session, insert_statement, [(str(i),) for i in xrange(num_records)])
+        execute_concurrent_with_args(self.session, insert_statement, [(str(i),) for i in range(num_records)])
 
         tempfile = self.get_temp_file()
-        debug('Exporting tokens {} - {} for {} records to csv file: {}'.format(begin_token, end_token,
+        logger.debug('Exporting tokens {} - {} for {} records to csv file: {}'.format(begin_token, end_token,
                                                                                num_records, tempfile.name))
         cmds = "COPY ks.testtokens TO '{}'".format(tempfile.name)
         if begin_token and end_token:
@@ -1111,10 +1112,10 @@ class CqlshCopyTest(Tester):
         elif end_token:
             cmds += "WITH ENDTOKEN = '{}'".format(end_token)
 
-        debug(cmds)
+        logger.debug(cmds)
         out, err, _ = self.run_cqlsh(cmds=cmds)
-        debug(err)
-        debug(out)
+        logger.debug(err)
+        logger.debug(out)
 
         max_long = 2 ** 63 - 1
         min_long = -max_long - 1
@@ -1123,13 +1124,13 @@ class CqlshCopyTest(Tester):
         if not end_token:
             end_token = max_long
 
-        tokens = [murmur3(str(i)) for i in xrange(num_records)]
-        result = sorted([(str(i), tokens[i]) for i in xrange(num_records) if begin_token <= tokens[i] <= end_token])
+        tokens = [murmur3(str(i)) for i in range(num_records)]
+        result = sorted([(str(i), tokens[i]) for i in range(num_records) if begin_token <= tokens[i] <= end_token])
 
         with open(tempfile.name, 'r') as csvfile:
             csv_values = sorted([(v[0], tokens[int(v[0])]) for v in csv.reader(csvfile)])
 
-        self.assertItemsEqual(csv_values, result)
+        assert csv_values == result
 
     def test_reading_max_parse_errors(self):
         """
@@ -1158,20 +1159,20 @@ class CqlshCopyTest(Tester):
 
         with open(tempfile.name, 'w') as csvfile:
             writer = csv.DictWriter(csvfile, fieldnames=['a', 'b', 'c'])
-            for i in xrange(num_rows):
+            for i in range(num_rows):
                 if i % 2 == 0:
                     writer.writerow({'a': i, 'b': 0, 'c': 'abc'})  # invalid
                 else:
                     writer.writerow({'a': i, 'b': 0, 'c': 2.0})  # valid
 
-        debug("Importing csv file {} with {} max parse errors".format(tempfile.name, max_parse_errors))
+        logger.debug("Importing csv file {} with {} max parse errors".format(tempfile.name, max_parse_errors))
         out, err, _ = self.run_cqlsh(cmds="COPY ks.testmaxparseerrors FROM '{}' WITH MAXPARSEERRORS='{}'"
                                      .format(tempfile.name, max_parse_errors))
 
-        self.assertIn('Exceeded maximum number of parse errors {}'.format(max_parse_errors), err)
+        assert 'Exceeded maximum number of parse errors {}'.format(max_parse_errors) in err
         num_rows_imported = rows_to_list(self.session.execute("SELECT COUNT(*) FROM ks.testmaxparseerrors"))[0][0]
-        debug("Imported {} rows".format(num_rows_imported))
-        self.assertTrue(num_rows_imported < (num_rows / 2))  # less than the maximum number of valid rows in the csv
+        logger.debug("Imported {} rows".format(num_rows_imported))
+        assert num_rows_imported < (num_rows / 2)  # less than the maximum number of valid rows in the csv
 
     def test_reading_max_insert_errors(self):
         """
@@ -1203,7 +1204,7 @@ class CqlshCopyTest(Tester):
 
         with open(tempfile.name, 'w') as csvfile:
             writer = csv.DictWriter(csvfile, fieldnames=['a', 'b', 'c'])
-            for i in xrange(num_rows):
+            for i in range(num_rows):
                 writer.writerow({'a': i, 'b': 0, 'c': 2.0})
 
         failures = {'failing_batch': {'id': 3, 'failures': 2}}
@@ -1213,7 +1214,7 @@ class CqlshCopyTest(Tester):
             self.session.execute("TRUNCATE ks.testmaxinserterrors")
             num_expected_rows = num_rows - chunk_size  # one chunk will fail
 
-            debug("Importing csv file {} with {} max insert errors and chunk size {}"
+            logger.debug("Importing csv file {} with {} max insert errors and chunk size {}"
                   .format(tempfile.name, max_insert_errors, chunk_size))
             # Note: we use one attempt because each attempt counts as a failure
             out, err, _ = self.run_cqlsh(cmds="COPY ks.testmaxinserterrors FROM '{}' WITH MAXINSERTERRORS='{}' "
@@ -1221,15 +1222,14 @@ class CqlshCopyTest(Tester):
                                          .format(tempfile.name, max_insert_errors, chunk_size))
 
             num_rows_imported = rows_to_list(self.session.execute("SELECT COUNT(*) FROM ks.testmaxinserterrors"))[0][0]
-            debug("Imported {}".format(num_rows_imported))
+            logger.debug("Imported {}".format(num_rows_imported))
             if max_insert_errors < chunk_size:
-                self.assertIn('Exceeded maximum number of insert errors {}'.format(max_insert_errors), err)
-                self.assertTrue(num_rows_imported <= num_expected_rows,
-                                "{} < {}".format(num_rows_imported, num_expected_rows))
+                assert 'Exceeded maximum number of insert errors {}'.format(max_insert_errors) in err
+                assert num_rows_imported <= num_expected_rows, "{} < {}".format(num_rows_imported, num_expected_rows)
             else:
-                self.assertNotIn('Exceeded maximum number of insert errors {}'.format(max_insert_errors), err)
-                self.assertIn('Failed to process {} rows'.format(chunk_size), err)
-                self.assertEquals(num_expected_rows, num_rows_imported)
+                assert 'Exceeded maximum number of insert errors {}'.format(max_insert_errors) not in err
+                assert 'Failed to process {} rows'.format(chunk_size) in err
+                assert num_expected_rows == num_rows_imported
 
         do_test(50, 100)
         do_test(100, 50)
@@ -1262,8 +1262,8 @@ class CqlshCopyTest(Tester):
             valid_rows = []
             with open(tempfile.name, 'w') as csvfile:
                 writer = csv.DictWriter(csvfile, fieldnames=['a', 'b', 'c'])
-                for i in xrange(num_chunks):
-                    for k in xrange(chunk_size):
+                for i in range(num_chunks):
+                    for k in range(chunk_size):
                         if k < num_failing_per_chunk:  # invalid
                             if i == 0 and k == 0:  # fail on a primary key (only once)
                                 writer.writerow({'a': 'bb', 'b': k, 'c': 1.0})
@@ -1278,18 +1278,18 @@ class CqlshCopyTest(Tester):
             err_file_name = err_file.name if err_file else 'import_ks_testparseerrors.err'
             self.session.execute("TRUNCATE testparseerrors")
 
-            debug("Importing csv file {} with err_file {} and {}/{}/{}"
+            logger.debug("Importing csv file {} with err_file {} and {}/{}/{}"
                   .format(tempfile.name, err_file_name, num_chunks, chunk_size, num_failing_per_chunk))
             cmd = "COPY ks.testparseerrors FROM '{}' WITH CHUNKSIZE={}".format(tempfile.name, chunk_size)
             if err_file:
                 cmd += " AND ERRFILE='{}'".format(err_file.name)
             self.run_cqlsh(cmds=cmd)
 
-            debug('Sorting')
+            logger.debug('Sorting')
             results = sorted(rows_to_list(self.session.execute("SELECT * FROM ks.testparseerrors")))
-            debug('Checking valid rows')
-            self.assertItemsEqual(valid_rows, results)
-            debug('Checking invalid rows')
+            logger.debug('Checking valid rows')
+            assert valid_rows == results
+            logger.debug('Checking invalid rows')
             self.assertCsvResultEqual(err_file_name, invalid_rows, cql_type_names=['text', 'int', 'text'])
 
         do_test(100, 2, 1, self.get_temp_file())
@@ -1334,19 +1334,19 @@ class CqlshCopyTest(Tester):
             invalid_rows.append([0, 0, 1.0])
 
             writer = csv.DictWriter(csvfile, fieldnames=['a', 'b', 'c', 'd', 'e'])
-            for i in xrange(1, 100):
+            for i in range(1, 100):
                 writer.writerow({'a': i, 'b': i, 'c': 2.0, 'd': 3.0, 'e': 4.0})
                 valid_rows.append([i, i, 2.0, 3.0, 4.0])
 
-        debug("Importing csv file {} with err_file {}".format(tempfile.name, err_file.name))
+        logger.debug("Importing csv file {} with err_file {}".format(tempfile.name, err_file.name))
         cmd = "COPY ks.testwrongnumcols FROM '{}' WITH ERRFILE='{}'".format(tempfile.name, err_file.name)
         self.run_cqlsh(cmds=cmd)
 
-        debug('Sorting')
+        logger.debug('Sorting')
         results = sorted(rows_to_list(self.session.execute("SELECT * FROM ks.testwrongnumcols")))
-        debug('Checking valid rows')
-        self.assertItemsEqual(valid_rows, results)
-        debug('Checking invalid rows')
+        logger.debug('Checking valid rows')
+        assert valid_rows == results
+        logger.debug('Checking invalid rows')
         self.assertCsvResultEqual(err_file.name, invalid_rows, 'testwrongnumcols', columns=['a', 'b', 'e'])
 
         os.unlink(err_file.name)
@@ -1375,19 +1375,19 @@ class CqlshCopyTest(Tester):
         num_files = 10
         tempfiles = []
 
-        for i in xrange(num_files):
+        for i in range(num_files):
             tempfiles.append(self.get_temp_file(prefix='testreadmult{}'.format(i), suffix='.csv'))
 
-        for i in xrange(num_files):
+        for i in range(num_files):
             with open(tempfiles[i].name, 'w') as csvfile:
                 writer = csv.DictWriter(csvfile, fieldnames=['a', 'b', 'c'])
-                for k in xrange(num_rows_per_file):
+                for k in range(num_rows_per_file):
                     writer.writerow({'a': i, 'b': k, 'c': 2.0})
 
         def import_and_check(temp_files_str):
             self.session.execute("TRUNCATE testmultifiles")
 
-            debug("Importing csv files {}".format(temp_files_str))
+            logger.debug("Importing csv files {}".format(temp_files_str))
             self.run_cqlsh(cmds="COPY ks.testmultifiles FROM '{}'".format(temp_files_str))
 
             self.assertEqual([[num_rows_per_file * len(tempfiles)]],
@@ -1417,7 +1417,7 @@ class CqlshCopyTest(Tester):
 
         def do_test(max_size, header):
             tempfile = self.get_temp_file(prefix='testwritemult', suffix='.csv')
-            debug('Exporting to csv file: {} with max size {} and header {}'
+            logger.debug('Exporting to csv file: {} with max size {} and header {}'
                   .format(tempfile.name, max_size, header))
             cmd = "COPY {} TO '{}' WITH MAXOUTPUTSIZE='{}'".format(stress_table, tempfile.name, max_size)
             if header:
@@ -1431,15 +1431,15 @@ class CqlshCopyTest(Tester):
                 os.unlink(f)
 
             num_expected_files = num_records / max_size if num_records % max_size == 0 else (num_records / max_size + 1)
-            self.assertEquals(num_expected_files, len(output_files))
-            self.assertEquals(num_records + 1 if header else num_records, sum(num_lines))
+            assert num_expected_files == len(output_files)
+            assert num_records + 1 if header else num_records == sum(num_lines)
 
             for i, n in enumerate(sorted(num_lines, reverse=True)):
                 if i < num_records / max_size:
                     num_expected_lines = max_size + 1 if i == 0 and header else max_size
-                    self.assertEquals(num_expected_lines, n)
+                    assert num_expected_lines == n
                 else:
-                    self.assertEquals(num_records % max_size, n)
+                    assert num_records % max_size == n
 
         do_test(1000, False)
         do_test(1000, True)
@@ -1662,7 +1662,7 @@ class CqlshCopyTest(Tester):
             self.session.execute("TRUNCATE testvalidate")
 
             tempfile = self.get_temp_file()
-            debug('Writing {}'.format(tempfile.name))
+            logger.debug('Writing {}'.format(tempfile.name))
             write_rows_to_csv(tempfile.name, data)
 
             cmd = """COPY ks.testvalidate (a, b, c) FROM '{name}'""".format(name=tempfile.name)
@@ -1672,10 +1672,10 @@ class CqlshCopyTest(Tester):
             results = list(self.session.execute("SELECT * FROM testvalidate"))
 
             if expected_err:
-                self.assertIn(expected_err, err)
-                self.assertFalse(results)
+                assert expected_err in err
+                assert not results
             else:
-                self.assertFalse(err)
+                assert not err
                 self.assertCsvResultEqual(tempfile.name, results, 'testvalidate')
 
     @since('2.2')
@@ -1700,18 +1700,18 @@ class CqlshCopyTest(Tester):
             )""")
 
         tempfile = self.get_temp_file()
-        debug('Writing {}'.format(tempfile.name))
+        logger.debug('Writing {}'.format(tempfile.name))
         write_rows_to_csv(tempfile.name, [[1, 1, 1]])
 
         cmd = """COPY ks.testwrongcolumns (a, b, d) FROM '{}'""".format(tempfile.name)
         out, err, _ = self.run_cqlsh(cmd)
-        debug(out)
-        debug(err)
+        logger.debug(out)
+        logger.debug(err)
         results = list(self.session.execute("SELECT * FROM testwrongcolumns"))
 
-        self.assertIn('Invalid column name d', err)
-        self.assertNotIn('child process(es) died unexpectedly', err)
-        self.assertFalse(results)
+        assert 'Invalid column name d' in err
+        assert 'child process(es) died unexpectedly' not in err
+        assert not results
 
     def test_all_datatypes_write(self):
         """
@@ -1733,7 +1733,7 @@ class CqlshCopyTest(Tester):
 
         def _test(prepared_statements):
             tempfile = self.get_temp_file()
-            debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+            logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
             self.run_cqlsh(cmds="COPY ks.testdatatype TO '{}' WITH PREPAREDSTATEMENTS = {}"
                            .format(tempfile.name, prepared_statements))
 
@@ -1767,7 +1767,7 @@ class CqlshCopyTest(Tester):
             writer.writerow(data_set)
 
         def _test(prepared_statements):
-            debug('Importing from csv file: {name}'.format(name=tempfile.name))
+            logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
             self.run_cqlsh(cmds="COPY ks.testdatatype FROM '{}' WITH PREPAREDSTATEMENTS = {}"
                            .format(tempfile.name, prepared_statements))
 
@@ -1801,7 +1801,7 @@ class CqlshCopyTest(Tester):
         self.session.execute(insert_statement, self.data)
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
         self.run_cqlsh(cmds="COPY ks.testdatatype TO '{}'".format(tempfile.name))
 
         exported_results = list(self.session.execute("SELECT * FROM testdatatype"))
@@ -1814,7 +1814,7 @@ class CqlshCopyTest(Tester):
 
             imported_results = list(self.session.execute("SELECT * FROM testdatatype"))
 
-            self.assertEqual(exported_results, imported_results)
+            assert exported_results == imported_results
 
         _test(True)
         _test(False)
@@ -1835,26 +1835,26 @@ class CqlshCopyTest(Tester):
         @jira_ticket CASSANDRA-9303
         """
         def do_round_trip(trueval, falseval, invalid=False):
-            debug('Exporting to csv file: {} with bool style {},{}'.format(tempfile.name, trueval, falseval))
+            logger.debug('Exporting to csv file: {} with bool style {},{}'.format(tempfile.name, trueval, falseval))
             _, err, _ = self.run_cqlsh(cmds="COPY ks.testbooleans TO '{}' WITH BOOLSTYLE='{}, {}'"
                                        .format(tempfile.name, trueval, falseval))
             if invalid:
                 expected_err = "Invalid boolean styles [{}, {}]".format(
                     ', '.join(["'{}'".format(s.strip()) for s in trueval.split(',')]),
                     ', '.join(["'{}'".format(s.strip()) for s in falseval.split(',')]))
-                self.assertIn(expected_err, err)
+                assert expected_err in err
                 return
 
-            self.assertItemsEqual([['0', falseval], ['1', trueval]], list(csv_rows(tempfile.name)))
+            assert [['0', falseval], ['1', trueval]] == list(csv_rows(tempfile.name))
             exported_results = list(self.session.execute("SELECT * FROM testbooleans"))
 
-            debug('Importing from csv file: {}'.format(tempfile.name))
+            logger.debug('Importing from csv file: {}'.format(tempfile.name))
             self.session.execute('TRUNCATE ks.testbooleans')
             self.run_cqlsh(cmds="COPY ks.testbooleans FROM '{}' WITH BOOLSTYLE='{}, {}'"
                            .format(tempfile.name, trueval, falseval))
 
             imported_results = list(self.session.execute("SELECT * FROM testbooleans"))
-            self.assertEqual(exported_results, imported_results)
+            assert exported_results == imported_results
 
         self.prepare()
         self.session.execute("""
@@ -1999,23 +1999,23 @@ class CqlshCopyTest(Tester):
         tempfile = self.get_temp_file()
 
         def do_test(expected_vals, thousands_sep, decimal_sep):
-            debug('Exporting to csv file: {} with thousands_sep {} and decimal_sep {}'
+            logger.debug('Exporting to csv file: {} with thousands_sep {} and decimal_sep {}'
                   .format(tempfile.name, thousands_sep, decimal_sep))
             self.run_cqlsh(cmds="COPY ks.testnumberseps TO '{}' WITH THOUSANDSSEP='{}' AND DECIMALSEP='{}'"
                            .format(tempfile.name, thousands_sep, decimal_sep))
 
             exported_results = list(self.session.execute("SELECT * FROM testnumberseps"))
             self.maxDiff = None
-            self.assertItemsEqual(expected_vals, list(csv_rows(tempfile.name)))
+            assert expected_vals == list(csv_rows(tempfile.name))
 
-            debug('Importing from csv file: {} with thousands_sep {} and decimal_sep {}'
+            logger.debug('Importing from csv file: {} with thousands_sep {} and decimal_sep {}'
                   .format(tempfile.name, thousands_sep, decimal_sep))
             self.session.execute('TRUNCATE ks.testnumberseps')
             self.run_cqlsh(cmds="COPY ks.testnumberseps FROM '{}' WITH THOUSANDSSEP='{}' AND DECIMALSEP='{}'"
                            .format(tempfile.name, thousands_sep, decimal_sep))
 
             imported_results = list(self.session.execute("SELECT * FROM testnumberseps"))
-            self.assertEqual(len(expected_vals), len(imported_results))
+            assert len(expected_vals) == len(imported_results)
 
             table_meta = UpdatingTableMetadataWrapper(self.session.cluster,
                                                       ks_name=self.ks,
@@ -2023,7 +2023,7 @@ class CqlshCopyTest(Tester):
             cql_type_names = [table_meta.columns[c].cql_type for c in table_meta.columns]
 
             # we format as if we were comparing to csv to overcome loss of precision in the import
-            self.assertEqual(self.result_to_csv_rows(exported_results, cql_type_names),
+            assert self.result_to_csv_rows(exported_results == cql_type_names,
                              self.result_to_csv_rows(imported_results, cql_type_names))
 
         do_test(expected_vals_usual, ',', '.')
@@ -2050,17 +2050,16 @@ class CqlshCopyTest(Tester):
             writer.writerow([2, '1943-06-19 11:21:01.123+0000'])
             writer.writerow([3, '1943-06-19 11:21:01.123456+0000'])
 
-        debug('Importing from csv file: {}'.format(tempfile1.name))
+        logger.debug('Importing from csv file: {}'.format(tempfile1.name))
         self.run_cqlsh(cmds="COPY ks.testsubsecond FROM '{}'".format(tempfile1.name))
 
-        debug('Exporting to csv file: {}'.format(tempfile2.name))
+        logger.debug('Exporting to csv file: {}'.format(tempfile2.name))
         self.run_cqlsh(cmds="COPY ks.testsubsecond TO '{}'".format(tempfile2.name))
 
         csv_results = sorted(list(csv_rows(tempfile2.name)))
-        self.assertItemsEqual([['1', '1943-06-19 11:21:01.000+0000'],
+        assert [['1', '1943-06-19 11:21:01.000+0000'],
                                ['2', '1943-06-19 11:21:01.123+0000'],
-                               ['3', '1943-06-19 11:21:01.124+0000']],
-                              csv_results)
+                               ['3', '1943-06-19 11:21:01.124+0000']] == csv_results
 
     @since('3.6')
     def test_round_trip_with_different_number_precision(self):
@@ -2088,7 +2087,7 @@ class CqlshCopyTest(Tester):
                 writer = csv.writer(csvfile)
                 writer.writerow([1, float_format_str.format(1.12345), double_format_str.format(1.123456789123)])
 
-            debug('Importing from {}'.format(tempfile1.name))
+            logger.debug('Importing from {}'.format(tempfile1.name))
             self.run_cqlsh(cmds="COPY ks.testfloatprecision FROM '{}'".format(tempfile1.name))
 
             cmd = "COPY ks.testfloatprecision TO '{}'".format(tempfile2.name)
@@ -2101,10 +2100,10 @@ class CqlshCopyTest(Tester):
             if float_precision is not None:
                 cmd += " FLOATPRECISION={}".format(float_precision)
 
-            debug('Exporting to {} with {}'.format(tempfile2.name, cmd))
+            logger.debug('Exporting to {} with {}'.format(tempfile2.name, cmd))
             self.run_cqlsh(cmds=cmd)
 
-            self.assertItemsEqual(sorted(list(csv_rows(tempfile1.name))), sorted(list(csv_rows(tempfile2.name))))
+            assert sorted(list(csv_rows(tempfile1.name))) == sorted(list(csv_rows(tempfile2.name)))
 
         do_test(None, None)
         do_test(None, 10)
@@ -2128,25 +2127,25 @@ class CqlshCopyTest(Tester):
         num_processes = 4
         stress_table = 'keyspace1.standard1'
 
-        debug('Running stress without any user profile')
+        logger.debug('Running stress without any user profile')
         self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {}'.format(tempfile.name))
+        logger.debug('Exporting to csv file: {}'.format(tempfile.name))
         out, _, _ = self.run_cqlsh(cmds="COPY {} TO '{}' WITH NUMPROCESSES='{}'"
                                    .format(stress_table, tempfile.name, num_processes))
-        debug(out)
-        self.assertIn('Using {} child processes'.format(num_processes), out)
-        self.assertEqual(num_records, len(open(tempfile.name).readlines()))
+        logger.debug(out)
+        assert 'Using {} child processes'.format(num_processes) in out
+        assert num_records == len(open(tempfile.name).readlines())
 
         self.session.execute("TRUNCATE {}".format(stress_table))
-        debug('Importing from csv file: {}'.format(tempfile.name))
+        logger.debug('Importing from csv file: {}'.format(tempfile.name))
         out, _, _ = self.run_cqlsh(cmds="COPY {} FROM '{}' WITH NUMPROCESSES='{}'"
                                    .format(stress_table, tempfile.name, num_processes))
-        debug(out)
-        self.assertIn('Using {} child processes'.format(num_processes), out)
-        self.assertEqual([[num_records]], rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}"
-                                                                            .format(stress_table))))
+        logger.debug(out)
+        assert 'Using {} child processes'.format(num_processes) in out
+        assert [[num_records]] == rows_to_list(self.session.execute("SELECT COUNT(* FROM {}"
+                                                                            .format(stress_table)))
 
     def test_round_trip_with_rate_file(self):
         """
@@ -2166,21 +2165,21 @@ class CqlshCopyTest(Tester):
             # frequency is every 100 milliseconds this should be the number of lines written in 1 second)
             # and that the last line indicates all rows were processed
             lines = [line.rstrip('\n') for line in open(ratefile.name)]
-            debug(lines)
-            self.assertLessEqual(10, len(lines), "Expected at least 10 lines but got {} lines".format(len(lines)))
-            self.assertTrue(lines[-1].startswith('Processed: {} rows;'.format(num_rows)))
+            logger.debug(lines)
+            assert 10 <= len(lines), "Expected at least 10 lines but got {} lines".format(len(lines))
+            assert lines[-1].startswith('Processed: {} rows;'.format(num_rows))
 
         self.prepare()
 
-        debug('Running stress')
+        logger.debug('Running stress')
         self.node1.stress(['write', 'n={}'.format(num_rows), 'no-warmup', '-rate', 'threads=50'])
 
-        debug('Exporting to csv file: {}'.format(tempfile.name))
+        logger.debug('Exporting to csv file: {}'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} TO '{}' WITH RATEFILE='{}' AND REPORTFREQUENCY='{}'"
                        .format(stress_table, tempfile.name, ratefile.name, report_frequency))
 
         # check all records were exported
-        self.assertEqual(num_rows, len(open(tempfile.name).readlines()))
+        assert num_rows == len(open(tempfile.name).readlines())
 
         check_rate_file()
 
@@ -2188,13 +2187,13 @@ class CqlshCopyTest(Tester):
         os.unlink(ratefile.name)
         self.session.execute("TRUNCATE {}".format(stress_table))
 
-        debug('Importing from csv file: {}'.format(tempfile.name))
+        logger.debug('Importing from csv file: {}'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} FROM '{}' WITH RATEFILE='{}' AND REPORTFREQUENCY='{}'"
                        .format(stress_table, tempfile.name, ratefile.name, report_frequency))
 
         # check all records were imported
-        self.assertEqual([[num_rows]], rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}"
-                                                                         .format(stress_table))))
+        assert [[num_rows]] == rows_to_list(self.session.execute("SELECT COUNT(* FROM {}"
+                                                                         .format(stress_table)))
 
         check_rate_file()
 
@@ -2210,13 +2209,13 @@ class CqlshCopyTest(Tester):
         tempfile = self.get_temp_file()
         self.prepare(nodes=1)
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table = 'keyspace1.standard1'
         self.node1.stress(['write', 'n=1K', 'no-warmup', '-rate', 'threads=50'])
 
         def create_config_file(config_lines):
             config_file = self.get_temp_file()
-            debug('Creating config file {}'.format(config_file.name))
+            logger.debug('Creating config file {}'.format(config_file.name))
 
             with open(config_file.name, 'wb') as config:
                 for line in config_lines:
@@ -2233,11 +2232,11 @@ class CqlshCopyTest(Tester):
             return ''
 
         def check_options(out, expected_options):
-            opts = extract_options(out)
-            debug('Options: {}'.format(opts))
+            opts = extract_options(out.decode("utf-8"))
+            logger.debug('Options: {}'.format(opts))
             d = json.loads(opts)
             for k, v in expected_options:
-                self.assertEqual(v, d[k])
+                assert v == d[k]
 
         def do_test(config_lines, expected_options):
             config_file = create_config_file(config_lines)
@@ -2250,9 +2249,9 @@ class CqlshCopyTest(Tester):
             if use_default:
                 cqlsh_options.append('--cqlshrc={}'.format(config_file))
 
-            debug('{} with options {}'.format(cmd, cqlsh_options))
+            logger.debug('{} with options {}'.format(cmd, cqlsh_options))
             out, _, _ = self.run_cqlsh(cmds=cmd, cqlsh_options=cqlsh_options, skip_cqlshrc=True)
-            debug(out)
+            logger.debug(out)
             check_options(out, expected_options)
 
         for use_default in [True, False]:
@@ -2306,11 +2305,11 @@ class CqlshCopyTest(Tester):
         tempfile = self.get_temp_file()
         write_rows_to_csv(tempfile.name, data)
 
-        debug('Importing from csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
         out, err, _ = self.run_cqlsh("COPY ks.testcolumns FROM '{name}'".format(name=tempfile.name))
 
-        self.assertFalse(self.session.execute("SELECT * FROM testcolumns"))
-        self.assertIn('Failed to import', err)
+        assert not self.session.execute("SELECT * FROM testcolumns")
+        assert 'Failed to import' in err
 
     def _test_round_trip(self, nodes, partitioner, num_records=10000):
         """
@@ -2341,21 +2340,21 @@ class CqlshCopyTest(Tester):
         results = list(self.session.execute("SELECT * FROM testcopyto"))
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {}'.format(tempfile.name))
+        logger.debug('Exporting to csv file: {}'.format(tempfile.name))
         out, err, _ = self.run_cqlsh(cmds="COPY ks.testcopyto TO '{}'".format(tempfile.name))
-        debug(out)
+        logger.debug(out)
 
         # check all records were exported
-        self.assertEqual(num_records, sum(1 for line in open(tempfile.name)))
+        assert num_records == sum(1 for line in open(tempfile.name))
 
         # import the CSV file with COPY FROM
         self.session.execute("TRUNCATE ks.testcopyto")
-        debug('Importing from csv file: {}'.format(tempfile.name))
+        logger.debug('Importing from csv file: {}'.format(tempfile.name))
         out, err, _ = self.run_cqlsh(cmds="COPY ks.testcopyto FROM '{}'".format(tempfile.name))
-        debug(out)
+        logger.debug(out)
 
         new_results = list(self.session.execute("SELECT * FROM testcopyto"))
-        self.assertEqual(results, new_results)
+        assert results == new_results
 
     def test_round_trip_murmur3(self):
         self._test_round_trip(nodes=3, partitioner="murmur3")
@@ -2392,7 +2391,7 @@ class CqlshCopyTest(Tester):
         results = list(self.session.execute("SELECT * FROM testcopyto"))
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))
 
         commandfile = self.get_temp_file()
         with open(commandfile.name, 'w') as f:
@@ -2403,7 +2402,7 @@ class CqlshCopyTest(Tester):
 
         # import the CSV file with COPY FROM
         self.session.execute("TRUNCATE ks.testcopyto")
-        debug('Importing from csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
 
         commandfile = self.get_temp_file()
         with open(commandfile.name, 'w') as f:
@@ -2412,7 +2411,7 @@ class CqlshCopyTest(Tester):
 
         self.run_cqlsh(cmds="SOURCE '{name}'".format(name=commandfile.name))
         new_results = list(self.session.execute("SELECT * FROM testcopyto"))
-        self.assertEqual(results, new_results)
+        assert results == new_results
 
     def _test_bulk_round_trip(self, nodes, partitioner,
                               num_operations, profile=None,
@@ -2454,10 +2453,10 @@ class CqlshCopyTest(Tester):
 
         def create_records():
             if not profile:
-                debug('Running stress without any user profile')
+                logger.debug('Running stress without any user profile')
                 self.node1.stress(['write', 'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])
             else:
-                debug('Running stress with user profile {}'.format(profile))
+                logger.debug('Running stress with user profile {}'.format(profile))
                 self.node1.stress(['user', 'profile={}'.format(profile), 'ops(insert=1)',
                                    'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])
 
@@ -2466,31 +2465,31 @@ class CqlshCopyTest(Tester):
             else:
                 count_statement = SimpleStatement("SELECT COUNT(*) FROM {}".format(stress_table), consistency_level=ConsistencyLevel.ALL)
                 ret = rows_to_list(self.session.execute(count_statement))[0][0]
-                debug('Generated {} records'.format(ret))
-                self.assertTrue(ret >= num_operations, 'cassandra-stress did not import enough records')
+                logger.debug('Generated {} records'.format(ret))
+                assert ret >= num_operations, 'cassandra-stress did not import enough records'
                 return ret
 
         def run_copy_to(filename):
-            debug('Exporting to csv file: {}'.format(filename.name))
+            logger.debug('Exporting to csv file: {}'.format(filename.name))
             start = datetime.datetime.now()
             copy_to_cmd = "CONSISTENCY ALL; COPY {} TO '{}'".format(stress_table, filename.name)
             if copy_to_options:
-                copy_to_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_to_options.iteritems())
-            debug('Running {}'.format(copy_to_cmd))
+                copy_to_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_to_options.items())
+            logger.debug('Running {}'.format(copy_to_cmd))
             result = self.run_cqlsh(cmds=copy_to_cmd)
             ret.append(result)
-            debug("COPY TO took {} to export {} records".format(datetime.datetime.now() - start, num_records))
+            logger.debug("COPY TO took {} to export {} records".format(datetime.datetime.now() - start, num_records))
 
         def run_copy_from(filename):
-            debug('Importing from csv file: {}'.format(filename.name))
+            logger.debug('Importing from csv file: {}'.format(filename.name))
             start = datetime.datetime.now()
             copy_from_cmd = "COPY {} FROM '{}'".format(stress_table, filename.name)
             if copy_from_options:
-                copy_from_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_from_options.iteritems())
-            debug('Running {}'.format(copy_from_cmd))
+                copy_from_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_from_options.items())
+            logger.debug('Running {}'.format(copy_from_cmd))
             result = self.run_cqlsh(cmds=copy_from_cmd)
             ret.append(result)
-            debug("COPY FROM took {} to import {} records".format(datetime.datetime.now() - start, num_records))
+            logger.debug("COPY FROM took {} to import {} records".format(datetime.datetime.now() - start, num_records))
 
         num_records = create_records()
 
@@ -2499,10 +2498,10 @@ class CqlshCopyTest(Tester):
         run_copy_to(tempfile1)
 
         # check all records generated were exported
-        self.assertEqual(num_records, sum(1 for _ in open(tempfile1.name)))
+        assert num_records == sum(1 for _ in open(tempfile1.name))
 
         # import records from the first csv file
-        debug('Truncating {}...'.format(stress_table))
+        logger.debug('Truncating {}...'.format(stress_table))
         self.session.execute("TRUNCATE {}".format(stress_table))
         run_copy_from(tempfile1)
 
@@ -2597,9 +2596,9 @@ class CqlshCopyTest(Tester):
         os.environ['CQLSH_COPY_TEST_NUM_CORES'] = '1'
         ret = self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=100000)
         if self.cluster.version() >= LooseVersion('3.6'):
-            debug('Checking that number of cores detected is correct')
+            logger.debug('Checking that number of cores detected is correct')
             for out in ret:
-                self.assertIn("Detected 1 core", out[0])
+                assert "Detected 1 core" in out[0]
 
     @since('3.0.5')
     def test_bulk_round_trip_with_backoff(self):
@@ -2621,9 +2620,9 @@ class CqlshCopyTest(Tester):
 
         @jira_ticket CASSANDRA-10858
         """
-        if DISABLE_VNODES:
+        if not self.dtest_config.use_vnodes:
             tokens = sorted(self.cluster.balanced_tokens(3))
-            debug('Using tokens {}'.format(tokens))
+            logger.debug('Using tokens {}'.format(tokens))
             self.prepare(nodes=3, tokens=tokens)
             start = tokens[1]
             end = tokens[2]
@@ -2631,14 +2630,14 @@ class CqlshCopyTest(Tester):
             self.prepare(nodes=1)
             metadata = self.session.cluster.metadata
             metadata.token_map.rebuild_keyspace(self.ks, build_if_absent=True)
-            ring = [t.value for t in metadata.token_map.tokens_to_hosts_by_ks[self.ks].keys()]
-            self.assertGreaterEqual(len(ring), 3, 'Not enough ranges in the ring for this test')
+            ring = [t.value for t in list(metadata.token_map.tokens_to_hosts_by_ks[self.ks].keys())]
+            assert len(ring) >= 3, 'Not enough ranges in the ring for this test'
             ring.sort()
-            idx = len(ring) / 2
+            idx = len(ring) // 2
             start = ring[idx]
             end = ring[idx + 1]
 
-        debug("Using failure range: {}, {}".format(start, end))
+        logger.debug("Using failure range: {}, {}".format(start, end))
         return start, end
 
     def test_copy_to_with_more_failures_than_max_attempts(self):
@@ -2653,7 +2652,7 @@ class CqlshCopyTest(Tester):
         num_records = 100000
         start, end = self.prepare_copy_to_with_failures()
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table = 'keyspace1.standard1'
         self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])
 
@@ -2661,15 +2660,15 @@ class CqlshCopyTest(Tester):
         failures = {'failing_range': {'start': start, 'end': end, 'num_failures': 5}}
         os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)
 
-        debug('Exporting to csv file: {} with {} and 3 max attempts'
+        logger.debug('Exporting to csv file: {} with {} and 3 max attempts'
               .format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
         out, err, _ = self.run_cqlsh(cmds="COPY {} TO '{}' WITH MAXATTEMPTS='3'"
                                      .format(stress_table, tempfile.name))
-        debug(out)
-        debug(err)
+        logger.debug(out)
+        logger.debug(err)
 
-        self.assertIn('some records might be missing', err)
-        self.assertTrue(len(open(tempfile.name).readlines()) < num_records)
+        assert 'some records might be missing' in err
+        assert len(open(tempfile.name).readlines()) < num_records
 
     def test_copy_to_with_fewer_failures_than_max_attempts(self):
         """
@@ -2683,22 +2682,22 @@ class CqlshCopyTest(Tester):
         num_records = 100000
         start, end = self.prepare_copy_to_with_failures()
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table = 'keyspace1.standard1'
         self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])
 
         tempfile = self.get_temp_file()
         failures = {'failing_range': {'start': start, 'end': end, 'num_failures': 3}}
         os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)
-        debug('Exporting to csv file: {} with {} and 5 max attemps'
+        logger.debug('Exporting to csv file: {} with {} and 5 max attemps'
               .format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
         out, err, _ = self.run_cqlsh(cmds="COPY {} TO '{}' WITH MAXATTEMPTS='5'"
                                      .format(stress_table, tempfile.name))
-        debug(out)
-        debug(err)
+        logger.debug(out)
+        logger.debug(err)
 
-        self.assertNotIn('some records might be missing', err)
-        self.assertEqual(num_records, len(open(tempfile.name).readlines()))
+        assert 'some records might be missing' not in err
+        assert num_records == len(open(tempfile.name).readlines())
 
     def test_copy_to_with_child_process_crashing(self):
         """
@@ -2712,7 +2711,7 @@ class CqlshCopyTest(Tester):
         num_records = 100000
         start, end = self.prepare_copy_to_with_failures()
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table = 'keyspace1.standard1'
         self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])
 
@@ -2720,14 +2719,14 @@ class CqlshCopyTest(Tester):
         failures = {'exit_range': {'start': start, 'end': end}}
         os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)
 
-        debug('Exporting to csv file: {} with {}'
+        logger.debug('Exporting to csv file: {} with {}'
               .format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
         out, err, _ = self.run_cqlsh(cmds="COPY {} TO '{}'".format(stress_table, tempfile.name))
-        debug(out)
-        debug(err)
+        logger.debug(out)
+        logger.debug(err)
 
-        self.assertIn('some records might be missing', err)
-        self.assertTrue(len(open(tempfile.name).readlines()) < num_records)
+        assert 'some records might be missing' in err
+        assert len(open(tempfile.name).readlines()) < num_records
 
     def test_copy_from_with_more_failures_than_max_attempts(self):
         """
@@ -2743,27 +2742,27 @@ class CqlshCopyTest(Tester):
         num_records = 1000
         self.prepare(nodes=1)
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table = 'keyspace1.standard1'
         self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
+        logger.debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} TO '{}'".format(stress_table, tempfile.name))
 
         self.session.execute("TRUNCATE {}".format(stress_table))
 
         failures = {'failing_batch': {'id': 30, 'failures': 5}}
         os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)
-        debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
+        logger.debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
         out, err, _ = self.run_cqlsh(cmds="COPY {} FROM '{}' WITH CHUNKSIZE='1' AND MAXATTEMPTS='3'"
                                      .format(stress_table, tempfile.name))
-        debug(out)
-        debug(err)
+        logger.debug(out)
+        logger.debug(err)
 
-        self.assertIn('Failed to process', err)
+        assert 'Failed to process' in err
         num_records_imported = rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}".format(stress_table)))[0][0]
-        self.assertTrue(num_records_imported < num_records)
+        assert num_records_imported < num_records
 
     def test_copy_from_with_fewer_failures_than_max_attempts(self):
         """
@@ -2782,27 +2781,27 @@ class CqlshCopyTest(Tester):
         num_records = 1000
         self.prepare(nodes=1)
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table = 'keyspace1.standard1'
         self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
+        logger.debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} TO '{}'".format(stress_table, tempfile.name))
 
         self.session.execute("TRUNCATE {}".format(stress_table))
 
         failures = {'failing_batch': {'id': 3, 'failures': 3}}
         os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)
-        debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
+        logger.debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
         out, err, _ = self.run_cqlsh(cmds="COPY {} FROM '{}' WITH CHUNKSIZE=100 AND MAXATTEMPTS=5 AND INGESTRATE=101"
                                      .format(stress_table, tempfile.name))
-        debug(out)
-        debug(err)
+        logger.debug(out)
+        logger.debug(err)
 
-        self.assertNotIn('Failed to process', err)
+        assert 'Failed to process' not in err
         num_records_imported = rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}".format(stress_table)))[0][0]
-        self.assertEquals(num_records, num_records_imported)
+        assert num_records == num_records_imported
 
     def test_copy_from_with_child_process_crashing(self):
         """
@@ -2817,27 +2816,27 @@ class CqlshCopyTest(Tester):
         num_records = 1000
         self.prepare(nodes=1)
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table = 'keyspace1.standard1'
         self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
+        logger.debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} TO '{}'".format(stress_table, tempfile.name))
 
         self.session.execute("TRUNCATE {}".format(stress_table))
 
         failures = {'exit_batch': {'id': 30}}
         os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)
-        debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
+        logger.debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
         out, err, _ = self.run_cqlsh(cmds="COPY {} FROM '{}' WITH CHUNKSIZE='1'"
                                      .format(stress_table, tempfile.name))
-        debug(out)
-        debug(err)
+        logger.debug(out)
+        logger.debug(err)
 
-        self.assertIn('1 child process(es) died unexpectedly, aborting', err)
+        assert '1 child process(es) died unexpectedly, aborting' in err
         num_records_imported = rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}".format(stress_table)))[0][0]
-        self.assertTrue(num_records_imported < num_records)
+        assert num_records_imported < num_records
 
     @since('3.0')
     def test_copy_from_with_unacked_batches(self):
@@ -2855,27 +2854,27 @@ class CqlshCopyTest(Tester):
         num_records = 1000
         self.prepare(nodes=1)
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table = 'keyspace1.standard1'
         self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
+        logger.debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} TO '{}'".format(stress_table, tempfile.name))
 
         self.session.execute("TRUNCATE {}".format(stress_table))
 
         failures = {'unsent_batch': {'id': 30}}
         os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)
-        debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
+        logger.debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))
         out, err, _ = self.run_cqlsh(cmds="COPY {} FROM '{}' WITH CHUNKSIZE=1 AND CHILDTIMEOUT=30 AND REQUESTTIMEOUT=15"
                                      .format(stress_table, tempfile.name))
-        debug(out)
-        debug(err)
+        logger.debug(out)
+        logger.debug(err)
 
-        self.assertIn('No records inserted in 30 seconds, aborting', err)
+        assert 'No records inserted in 30 seconds, aborting' in err
         num_records_imported = rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}".format(stress_table)))[0][0]
-        self.assertLess(num_records_imported, num_records)
+        assert num_records_imported < num_records
 
     @since('2.2.5')
     def test_copy_from_with_large_cql_rows(self):
@@ -2889,7 +2888,7 @@ class CqlshCopyTest(Tester):
         self.prepare(nodes=1, configuration_options={'batch_size_warn_threshold_in_kb': '1',   # warn with 1kb and fail
                                                      'batch_size_fail_threshold_in_kb': '5'})  # with 5kb size batches
 
-        debug('Running stress')
+        logger.debug('Running stress')
         stress_table_name = 'standard1'
         self.ks = 'keyspace1'
         stress_ks_table_name = self.ks + '.' + stress_table_name
@@ -2899,13 +2898,13 @@ class CqlshCopyTest(Tester):
                            '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)'])  # 10 columns of 1kb each
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
+        logger.debug('Exporting to csv file {} to generate a file'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} TO '{}'".format(stress_ks_table_name, tempfile.name))
 
         # Import using prepared statements (the default) and verify
         self.session.execute("TRUNCATE {}".format(stress_ks_table_name))
 
-        debug('Importing from csv file {}'.format(tempfile.name))
+        logger.debug('Importing from csv file {}'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} FROM '{}' WITH MAXBATCHSIZE=1".format(stress_ks_table_name, tempfile.name))
 
         results = list(self.session.execute("SELECT * FROM {}".format(stress_ks_table_name)))
@@ -2914,7 +2913,7 @@ class CqlshCopyTest(Tester):
         # Import without prepared statements and verify
         self.session.execute("TRUNCATE {}".format(stress_ks_table_name))
 
-        debug('Importing from csv file with MAXBATCHSIZE=1 {}'.format(tempfile.name))
+        logger.debug('Importing from csv file with MAXBATCHSIZE=1 {}'.format(tempfile.name))
         self.run_cqlsh(cmds="COPY {} FROM '{}' WITH MAXBATCHSIZE=1 AND PREPAREDSTATEMENTS=FALSE"
                        .format(stress_ks_table_name, tempfile.name))
 
@@ -2957,14 +2956,14 @@ class CqlshCopyTest(Tester):
                 writer.writerow(row)
 
         def _test(preparedStatements):
-            debug('Importing from csv file: {name}'.format(name=tempfile.name))
+            logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
             cmds = "COPY ks.testspecialcharsinudt FROM '{}' WITH PREPAREDSTATEMENTS = {}"\
                 .format(tempfile.name, preparedStatements)
-            debug(cmds)
+            logger.debug(cmds)
             self.run_cqlsh(cmds=cmds)
 
             results = list(self.session.execute("SELECT * FROM testspecialcharsinudt"))
-            debug(results)
+            logger.debug(results)
             # we set nullval to the literal string '' to ensure the csv formatting output on trunk
             # matches the __repr__ of MyType() and we need the '' around values to ensure we write
             # quoted values in the csv
@@ -2985,24 +2984,22 @@ class CqlshCopyTest(Tester):
         self.session.execute('CREATE TABLE ks.testauth (a int PRIMARY KEY, b text)')
 
         num_records = 10
-        for i in xrange(num_records):
+        for i in range(num_records):
             self.session.execute("INSERT INTO ks.testauth (a,b) VALUES ({}, 'abc')".format(i))
 
         tempfile = self.get_temp_file()
 
         # do an ordinary COPY TO AND FROM roundtrip
-        debug('Exporting to csv file: {}'.format(tempfile.name))
+        logger.debug('Exporting to csv file: {}'.format(tempfile.name))
         ret = self.run_cqlsh(cmds="COPY ks.testauth TO '{}'".format(tempfile.name), auth_enabled=True)
-        self.assertEqual(num_records, len(open(tempfile.name).readlines()),
-                         msg="Failed to export {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n"
-                         .format(num_records, ret.stderr, ret.stdout))
+        assert num_records == len(open(tempfile.name).readlines()), \
+            "Failed to export {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n".format(num_records, ret.stderr, ret.stdout)
 
         self.session.execute("TRUNCATE testauth")
-        debug('Importing from csv file: {}'.format(tempfile.name))
+        logger.debug('Importing from csv file: {}'.format(tempfile.name))
         ret = self.run_cqlsh(cmds="COPY ks.testauth FROM '{}'".format(tempfile.name), auth_enabled=True)
-        self.assertEqual([[num_records]], rows_to_list(self.session.execute("SELECT COUNT(*) FROM ks.testauth")),
-                         msg="Failed to import {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n"
-                         .format(num_records, ret.stderr, ret.stdout))
+        assert [[num_records]] == rows_to_list(self.session.execute("SELECT COUNT(*) FROM ks.testauth")), \
+            "Failed to import {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n".format(num_records, ret.stderr, ret.stdout)
 
         # do another COPY TO AND FROM roundtrip but invoke copy via the source command
         copy_to_cql = self.get_temp_file()
@@ -3013,32 +3010,28 @@ class CqlshCopyTest(Tester):
         with open(copy_from_cql.name, 'w') as f:
             f.write("COPY ks.testauth FROM '{}';".format(tempfile.name))
 
-        debug('Exporting to csv file {} via source of {}'.format(tempfile.name, copy_to_cql.name))
+        logger.debug('Exporting to csv file {} via source of {}'.format(tempfile.name, copy_to_cql.name))
         ret = self.run_cqlsh(cmds="SOURCE '{}'".format(copy_to_cql.name), auth_enabled=True)
-        self.assertEqual(num_records, len(open(tempfile.name).readlines()),
-                         msg="Failed to export {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n"
-                         .format(num_records, ret.stderr, ret.stdout))
+        assert num_records == len(open(tempfile.name).readlines()), \
+            "Failed to export {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n".format(num_records, ret.stderr, ret.stdout)
 
         self.session.execute("TRUNCATE testauth")
-        debug('Importing from csv file {} via source of {}'.format(tempfile.name, copy_from_cql.name))
+        logger.debug('Importing from csv file {} via source of {}'.format(tempfile.name, copy_from_cql.name))
         ret = self.run_cqlsh(cmds="SOURCE '{}'".format(copy_from_cql.name), auth_enabled=True)
-        self.assertEqual([[num_records]], rows_to_list(self.session.execute("SELECT COUNT(*) FROM ks.testauth")),
-                         msg="Failed to import {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n"
-                         .format(num_records, ret.stderr, ret.stdout))
+        assert [[num_records]] == rows_to_list(self.session.execute("SELECT COUNT(*) FROM ks.testauth")), \
+            "Failed to import {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n".format(num_records, ret.stderr, ret.stdout)
 
         # do another COPY TO AND FROM roundtrip but invoke copy via the -f cqlsh option
-        debug('Exporting to csv file {} via --file={}'.format(tempfile.name, copy_to_cql.name))
+        logger.debug('Exporting to csv file {} via --file={}'.format(tempfile.name, copy_to_cql.name))
         ret = self.run_cqlsh(cmds='', cqlsh_options=['--file={}'.format(copy_to_cql.name)], auth_enabled=True)
-        self.assertEqual(num_records, len(open(tempfile.name).readlines()),
-                         msg="Failed to export {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n"
-                         .format(num_records, ret.stderr, ret.stdout))
+        assert num_records == len(open(tempfile.name).readlines()), \
+            "Failed to export {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n".format(num_records, ret.stderr, ret.stdout)
 
         self.session.execute("TRUNCATE testauth")
-        debug('Importing from csv file {} via --file={}'.format(tempfile.name, copy_from_cql.name))
+        logger.debug('Importing from csv file {} via --file={}'.format(tempfile.name, copy_from_cql.name))
         ret = self.run_cqlsh(cmds='', cqlsh_options=['--file={}'.format(copy_from_cql.name)], auth_enabled=True)
-        self.assertEqual([[num_records]], rows_to_list(self.session.execute("SELECT COUNT(*) FROM ks.testauth")),
-                         msg="Failed to import {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n"
-                         .format(num_records, ret.stderr, ret.stdout))
+        assert [[num_records]] == rows_to_list(self.session.execute("SELECT COUNT(*) FROM ks.testauth")), \
+            "Failed to import {} rows\nSTDOUT:\n{}\nSTDERR:\n{}\n".format(num_records, ret.stderr, ret.stdout)
 
     @since('2.2')
     def test_reading_pk_timestamps_with_counters(self):
@@ -3069,7 +3062,7 @@ class CqlshCopyTest(Tester):
         with open(tempfile.name, 'w') as f:
             f.writelines(records)
 
-        debug('Importing from csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.test_pk_timestamps_with_counters FROM '{name}' WITH delimiter = '|'".format(name=tempfile.name)
         self.run_cqlsh(cmds=cmds)
 
@@ -3108,20 +3101,20 @@ class CqlshCopyTest(Tester):
 
         def _test(prepared_statements):
             self.session.execute('TRUNCATE testwrongorderinudt')
-            debug('Importing from csv file: {name}'.format(name=tempfile.name))
+            logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
             cmds = "COPY ks.testwrongorderinudt FROM '{}' WITH PREPAREDSTATEMENTS = {}"\
                 .format(tempfile.name, prepared_statements)
-            debug(cmds)
+            logger.debug(cmds)
             self.run_cqlsh(cmds=cmds)
 
             results = rows_to_list(self.session.execute("SELECT * FROM testwrongorderinudt where a = 1"))
-            self.assertEquals(MyType('val1', SortedSet(['val2_1', 'val2_2'])), results[0][1])
+            assert MyType('val1', SortedSet(['val2_1', 'val2_2'])) == results[0][1]
 
             results = rows_to_list(self.session.execute("SELECT * FROM testwrongorderinudt where a = 2"))
-            self.assertEquals(MyType(None, SortedSet(['val2_1', 'val2_2'])), results[0][1])
+            assert MyType(None, SortedSet(['val2_1', 'val2_2'])) == results[0][1]
 
             results = rows_to_list(self.session.execute("SELECT * FROM testwrongorderinudt where a = 3"))
-            self.assertEquals(MyType('val1', None), results[0][1])
+            assert MyType('val1', None) == results[0][1]
 
         _test(True)
         _test(False)
@@ -3152,7 +3145,7 @@ class CqlshCopyTest(Tester):
             f.write('text1,text2,127.0.0.1,text3,f7ce3ac0-a66e-11e6-b58e-4e29450fd577,SA,2\n')
             f.write('text4,ヽ(´ー`)ノ,127.0.0.2,text6,f7ce3ac0-a66e-11e6-b58e-4e29450fd577,SA,2\n')
 
-        debug('Importing from csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.test_reading_text_pk_counters FROM '{name}'".format(name=tempfile.name)
         self.run_cqlsh(cmds=cmds)
 
@@ -3184,7 +3177,7 @@ class CqlshCopyTest(Tester):
             f.write('text1,text2,127.0.0.1,text3,f7ce3ac0-a66e-11e6-b58e-4e29450fd577,SA\n')
             f.write('text4,ヽ(´ー`)ノ,127.0.0.2,text6,f7ce3ac0-a66e-11e6-b58e-4e29450fd577,SA\n')
 
-        debug('Importing from csv file: {name}'.format(name=tempfile.name))
+        logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
         cmds = "COPY ks.test_reading_text_pk_no_prepared_statements FROM '{name}' WITH PREPAREDSTATEMENTS=FALSE"\
             .format(name=tempfile.name)
         self.run_cqlsh(cmds=cmds)
@@ -3223,7 +3216,7 @@ class CqlshCopyTest(Tester):
             f.write(',,,a1,645e7d3c-aef7-4e3c-b834-24b792cf2e55,,,,r1\n')
 
         def _test(prepared_statements):
-            debug('Importing from csv file: {name}'.format(name=tempfile.name))
+            logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))
             cmds = "COPY ks.test_many_empty_strings FROM '{}' WITH NULL='-' AND PREPAREDSTATEMENTS = {}"\
                 .format(tempfile.name, prepared_statements)
             self.run_cqlsh(cmds=cmds)
@@ -3286,24 +3279,24 @@ class CqlshCopyTest(Tester):
             self.session.execute("INSERT INTO testunusualdates (a, b) VALUES ({}, '{}')".format(a, b))
 
         results = list(self.session.execute("SELECT * FROM testunusualdates"))
-        self.assertItemsEqual(expected_results, rows_to_list(results))
+        assert expected_results == rows_to_list(results)
 
         tempfile = self.get_temp_file()
-        debug('Exporting to csv file: {}'.format(tempfile.name))
+        logger.debug('Exporting to csv file: {}'.format(tempfile.name))
         out, err, _ = self.run_cqlsh(cmds="COPY ks.testunusualdates TO '{}'".format(tempfile.name))
-        debug(out)
+        logger.debug(out)
 
         # check all records were exported
         self.assertCsvResultEqual(tempfile.name, results, 'testunusualdates')
 
         # import the CSV file with COPY FROM
         self.session.execute("TRUNCATE ks.testunusualdates")
-        debug('Importing from csv file: {}'.format(tempfile.name))
+        logger.debug('Importing from csv file: {}'.format(tempfile.name))
         out, err, _ = self.run_cqlsh(cmds="COPY ks.testunusualdates FROM '{}'".format(tempfile.name))
-        debug(out)
+        logger.debug(out)
 
         new_results = list(self.session.execute("SELECT * FROM testunusualdates"))
-        self.assertEquals(results, new_results)
+        assert results == new_results
 
     @since('3.0')
     def test_importing_invalid_data_for_collections(self):
@@ -3316,18 +3309,18 @@ class CqlshCopyTest(Tester):
 
         def _check(file_name, table_name, expected_results):
             # import the CSV file with COPY FROM
-            debug('Importing from csv file: {}'.format(file_name))
+            logger.debug('Importing from csv file: {}'.format(file_name))
             out, err, _ = self.run_cqlsh(cmds="COPY ks.{} FROM '{}'".format(table_name, file_name))
-            debug(out)
+            logger.debug(out)
 
-            self.assertIn('ParseError - Failed to parse', err)
+            assert 'ParseError - Failed to parse' in err
 
             results = rows_to_list(self.session.execute("SELECT * FROM {}".format(table_name)))
-            debug(results)
-            self.assertItemsEqual(expected_results, results)
+            logger.debug(results)
+            assert expected_results == results
 
         def _test_invalid_data_for_sets():
-            debug('Testing invalid data for sets')
+            logger.debug('Testing invalid data for sets')
             self.session.execute("""
                             CREATE TABLE testinvaliddataforsets (
                                 key text,
@@ -3347,7 +3340,7 @@ class CqlshCopyTest(Tester):
             _check(tempfile.name, 'testinvaliddataforsets', expected_results)
 
         def _test_invalid_data_for_lists():
-            debug('Testing invalid data for lists')
+            logger.debug('Testing invalid data for lists')
             self.session.execute("""
                             CREATE TABLE testinvaliddataforlists (
                                 key text,
@@ -3367,7 +3360,7 @@ class CqlshCopyTest(Tester):
             _check(tempfile.name, 'testinvaliddataforlists', expected_results)
 
         def _test_invalid_data_for_maps():
-            debug('Testing invalid data for maps')
+            logger.debug('Testing invalid data for maps')
             self.session.execute("""
                             CREATE TABLE testinvaliddataformaps (
                                 key text,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[19/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/replication_test.py
----------------------------------------------------------------------
diff --git a/replication_test.py b/replication_test.py
index edc6fc0..8c29074 100644
--- a/replication_test.py
+++ b/replication_test.py
@@ -1,14 +1,18 @@
 import os
 import re
 import time
+import pytest
+import logging
+
 from collections import defaultdict
 
 from cassandra import ConsistencyLevel
 from cassandra.query import SimpleStatement
-from nose.plugins.attrib import attr
 
-from dtest import PRINT_DEBUG, DtestTimeoutError, Tester, debug, create_ks
-from tools.decorators import no_vnodes, since
+from dtest import DtestTimeoutError, Tester, create_ks
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 TRACE_DETERMINE_REPLICAS = re.compile('Determining replicas for mutation')
 TRACE_SEND_MESSAGE = re.compile('Sending (?:MUTATION|REQUEST_RESPONSE) message to /([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)')
@@ -73,8 +77,8 @@ def block_on_trace(session):
             raise DtestTimeoutError()
 
 
-@no_vnodes()
-class ReplicationTest(Tester):
+@pytest.mark.no_vnodes
+class TestReplication(Tester):
     """
     This test suite looks at how data is replicated across a cluster
     and who the coordinator, replicas and forwarders involved are.
@@ -176,7 +180,7 @@ class ReplicationTest(Tester):
         elif strategy == 'NetworkTopologyStrategy':
             # NetworkTopologyStrategy can be broken down into multiple
             # SimpleStrategies, just once per datacenter:
-            for dc, rf in replication_factor.items():
+            for dc, rf in list(replication_factor.items()):
                 dc_nodes = [n for n in nodes if n.data_center == dc]
                 replicas.extend(self.get_replicas_for_token(
                     token, rf, nodes=dc_nodes))
@@ -190,13 +194,13 @@ class ReplicationTest(Tester):
         """
         Pretty print a trace
         """
-        if PRINT_DEBUG:
-            print("-" * 40)
+        if logging.root.level == logging.DEBUG:
+            print(("-" * 40))
             for t in trace.events:
-                print("%s\t%s\t%s\t%s" % (t.source, t.source_elapsed, t.description, t.thread_name))
-            print("-" * 40)
+                print(("%s\t%s\t%s\t%s" % (t.source, t.source_elapsed, t.description, t.thread_name)))
+            print(("-" * 40))
 
-    def simple_test(self):
+    def test_simple(self):
         """
         Test the SimpleStrategy on a 3 node cluster
         """
@@ -209,8 +213,8 @@ class ReplicationTest(Tester):
         create_ks(session, 'test', replication_factor)
         session.execute('CREATE TABLE test.test (id int PRIMARY KEY, value text)', trace=False)
 
-        for key, token in murmur3_hashes.items():
-            debug('murmur3 hash key={key},token={token}'.format(key=key, token=token))
+        for key, token in list(murmur3_hashes.items()):
+            logger.debug('murmur3 hash key={key},token={token}'.format(key=key, token=token))
             query = SimpleStatement("INSERT INTO test (id, value) VALUES ({}, 'asdf')".format(key), consistency_level=ConsistencyLevel.ALL)
             future = session.execute_async(query, trace=True)
             future.result()
@@ -222,17 +226,17 @@ class ReplicationTest(Tester):
             stats = self.get_replicas_from_trace(trace)
             replicas_should_be = set(self.get_replicas_for_token(
                 token, replication_factor))
-            debug('\nreplicas should be: %s' % replicas_should_be)
-            debug('replicas were: %s' % stats['replicas'])
+            logger.debug('\nreplicas should be: %s' % replicas_should_be)
+            logger.debug('replicas were: %s' % stats['replicas'])
 
             # Make sure the correct nodes are replicas:
-            self.assertEqual(stats['replicas'], replicas_should_be)
+            assert stats['replicas'] == replicas_should_be
             # Make sure that each replica node was contacted and
             # acknowledged the write:
-            self.assertEqual(stats['nodes_sent_write'], stats['nodes_responded_write'])
+            assert stats['nodes_sent_write'] == stats['nodes_responded_write']
 
-    @attr("resource-intensive")
-    def network_topology_test(self):
+    @pytest.mark.resource_intensive
+    def test_network_topology(self):
         """
         Test the NetworkTopologyStrategy on a 2DC 3:3 node cluster
         """
@@ -248,7 +252,7 @@ class ReplicationTest(Tester):
 
         forwarders_used = set()
 
-        for key, token in murmur3_hashes.items():
+        for key, token in list(murmur3_hashes.items()):
             query = SimpleStatement("INSERT INTO test (id, value) VALUES ({}, 'asdf')".format(key), consistency_level=ConsistencyLevel.ALL)
             future = session.execute_async(query, trace=True)
             future.result()
@@ -260,9 +264,9 @@ class ReplicationTest(Tester):
             stats = self.get_replicas_from_trace(trace)
             replicas_should_be = set(self.get_replicas_for_token(
                 token, replication_factor, strategy='NetworkTopologyStrategy'))
-            debug('Current token is %s' % token)
-            debug('\nreplicas should be: %s' % replicas_should_be)
-            debug('replicas were: %s' % stats['replicas'])
+            logger.debug('Current token is %s' % token)
+            logger.debug('\nreplicas should be: %s' % replicas_should_be)
+            logger.debug('replicas were: %s' % stats['replicas'])
 
             # Make sure the coordinator only talked to a single node in
             # the second datacenter - CASSANDRA-5632:
@@ -270,27 +274,27 @@ class ReplicationTest(Tester):
             for node_contacted in stats['nodes_contacted'][node1.address()]:
                 if ip_nodes[node_contacted].data_center != node1.data_center:
                     num_in_other_dcs_contacted += 1
-            self.assertEqual(num_in_other_dcs_contacted, 1)
+            assert num_in_other_dcs_contacted == 1
 
             # Record the forwarder used for each INSERT:
             forwarders_used = forwarders_used.union(stats['forwarders'])
 
             try:
                 # Make sure the correct nodes are replicas:
-                self.assertEqual(stats['replicas'], replicas_should_be)
+                assert stats['replicas'] == replicas_should_be
                 # Make sure that each replica node was contacted and
                 # acknowledged the write:
-                self.assertEqual(stats['nodes_sent_write'], stats['nodes_responded_write'])
+                assert stats['nodes_sent_write'] == stats['nodes_responded_write']
             except AssertionError as e:
-                debug("Failed on key %s and token %s." % (key, token))
+                logger.debug("Failed on key %s and token %s." % (key, token))
                 raise e
 
         # Given a diverse enough keyset, each node in the second
         # datacenter should get a chance to be a forwarder:
-        self.assertEqual(len(forwarders_used), 3)
+        assert len(forwarders_used) == 3
 
 
-class SnitchConfigurationUpdateTest(Tester):
+class TestSnitchConfigurationUpdate(Tester):
     """
     Test to reproduce CASSANDRA-10238, wherein changing snitch properties to change racks without a restart
     could violate RF contract.
@@ -299,27 +303,31 @@ class SnitchConfigurationUpdateTest(Tester):
     which nodes should be shutdown in order to have the rack changed.
     """
 
-    ignore_log_patterns = ["Fatal exception during initialization",
-                           "Cannot start node if snitch's rack(.*) differs from previous rack(.*)",
-                           "Cannot update data center or rack"]
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            "Fatal exception during initialization",
+            "Cannot start node if snitch's rack(.*) differs from previous rack(.*)",
+            "Cannot update data center or rack"
+        )
 
     def check_endpoint_count(self, ks, table, nodes, rf):
         """
         Check a dummy key expecting it to have replication factor as the sum of rf on all dcs.
         """
-        expected_count = sum([int(r) for d, r in rf.iteritems() if d != 'class'])
+        expected_count = sum([int(r) for d, r in rf.items() if d != 'class'])
         for node in nodes:
             cmd = "getendpoints {} {} dummy".format(ks, table)
             out, err, _ = node.nodetool(cmd)
 
             if len(err.strip()) > 0:
-                debug("Error running 'nodetool {}': {}".format(cmd, err))
+                logger.debug("Error running 'nodetool {}': {}".format(cmd, err))
 
-            debug("Endpoints for node {}, expected count is {}".format(node.address(), expected_count))
-            debug(out)
+            logger.debug("Endpoints for node {}, expected count is {}".format(node.address(), expected_count))
+            logger.debug(out)
             ips_found = re.findall('(\d+\.\d+\.\d+\.\d+)', out)
 
-            self.assertEqual(len(ips_found), expected_count, "wrong number of endpoints found ({}), should be: {}".format(len(ips_found), expected_count))
+            assert len(ips_found) == expected_count, "wrong number of endpoints found ({}), should be: {}".format(len(ips_found), expected_count)
 
     def wait_for_nodes_on_racks(self, nodes, expected_racks):
         """
@@ -331,9 +339,9 @@ class SnitchConfigurationUpdateTest(Tester):
             while time.time() < wait_expire:
                 out, err, _ = node.nodetool("status")
 
-                debug(out)
+                logger.debug(out)
                 if len(err.strip()) > 0:
-                    debug("Error trying to run nodetool status: {}".format(err))
+                    logger.debug("Error trying to run nodetool status: {}".format(err))
 
                 racks = []
                 for line in out.split(os.linesep):
@@ -343,10 +351,10 @@ class SnitchConfigurationUpdateTest(Tester):
 
                 if racks == expected_racks:
                     # great, the topology change is propagated
-                    debug("Topology change detected on node {}".format(i))
+                    logger.debug("Topology change detected on node {}".format(i))
                     break
                 else:
-                    debug("Waiting for topology change on node {}".format(i))
+                    logger.debug("Waiting for topology change on node {}".format(i))
                     time.sleep(5)
             else:
                 raise RuntimeError("Ran out of time waiting for topology to change on node {}".format(i))
@@ -383,7 +391,7 @@ class SnitchConfigurationUpdateTest(Tester):
                                        final_racks=["rack0", "rack1", "rack2"],
                                        nodes_to_shutdown=[0, 2])
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     def test_rf_collapse_gossiping_property_file_snitch_multi_dc(self):
         """
         @jira_ticket CASSANDRA-10238
@@ -400,7 +408,7 @@ class SnitchConfigurationUpdateTest(Tester):
                                        final_racks=["rack1", "rack1", "rack1", "rack1", "rack1", "rack1"],
                                        nodes_to_shutdown=[0, 2, 3, 5])
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     def test_rf_expand_gossiping_property_file_snitch_multi_dc(self):
         """
         @jira_ticket CASSANDRA-10238
@@ -537,7 +545,7 @@ class SnitchConfigurationUpdateTest(Tester):
 
         session = self.patient_cql_connection(cluster.nodelist()[0])
 
-        options = (', ').join(['\'{}\': {}'.format(d, r) for d, r in rf.iteritems()])
+        options = (', ').join(['\'{}\': {}'.format(d, r) for d, r in rf.items()])
         session.execute("CREATE KEYSPACE testing WITH replication = {{{}}}".format(options))
         session.execute("CREATE TABLE testing.rf_test (key text PRIMARY KEY, value text)")
 
@@ -548,10 +556,10 @@ class SnitchConfigurationUpdateTest(Tester):
 
         for i in nodes_to_shutdown:
             node = cluster.nodelist()[i]
-            debug("Shutting down node {}".format(node.address()))
+            logger.debug("Shutting down node {}".format(node.address()))
             node.stop(wait_other_notice=True)
 
-        debug("Updating snitch file")
+        logger.debug("Updating snitch file")
         for i, node in enumerate(cluster.nodelist()):
             with open(os.path.join(node.get_conf_dir(), snitch_config_file), 'w') as topo_file:
                 for line in snitch_lines_after(i, node):
@@ -559,12 +567,12 @@ class SnitchConfigurationUpdateTest(Tester):
 
         # wait until the config is reloaded before we restart the nodes, the default check period is
         # 5 seconds so we wait for 10 seconds to be sure
-        debug("Waiting 10 seconds to make sure snitch file is reloaded...")
+        logger.debug("Waiting 10 seconds to make sure snitch file is reloaded...")
         time.sleep(10)
 
         for i in nodes_to_shutdown:
             node = cluster.nodelist()[i]
-            debug("Restarting node {}".format(node.address()))
+            logger.debug("Restarting node {}".format(node.address()))
             # Since CASSANDRA-10242 it is no longer
             # possible to start a node with a different rack unless we specify -Dcassandra.ignore_rack and since
             # CASSANDRA-9474 it is no longer possible to start a node with a different dc unless we specify
@@ -594,24 +602,24 @@ class SnitchConfigurationUpdateTest(Tester):
             for line in ["dc={}".format(node1.data_center), "rack=rack1"]:
                 topo_file.write(line + os.linesep)
 
-        debug("Starting node {} with rack1".format(node1.address()))
+        logger.debug("Starting node {} with rack1".format(node1.address()))
         node1.start(wait_for_binary_proto=True)
 
-        debug("Shutting down node {}".format(node1.address()))
+        logger.debug("Shutting down node {}".format(node1.address()))
         node1.stop(wait_other_notice=True)
 
-        debug("Updating snitch file with rack2")
+        logger.debug("Updating snitch file with rack2")
         for node in cluster.nodelist():
             with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
                 for line in ["dc={}".format(node.data_center), "rack=rack2"]:
                     topo_file.write(line + os.linesep)
 
-        debug("Restarting node {} with rack2".format(node1.address()))
+        logger.debug("Restarting node {} with rack2".format(node1.address()))
         mark = node1.mark_log()
         node1.start()
 
         # check node not running
-        debug("Waiting for error message in log file")
+        logger.debug("Waiting for error message in log file")
 
         if cluster.version() >= '2.2':
             node1.watch_log_for("Cannot start node if snitch's rack(.*) differs from previous rack(.*)",
@@ -696,7 +704,7 @@ class SnitchConfigurationUpdateTest(Tester):
 
         marks = [node.mark_log() for node in cluster.nodelist()]
 
-        debug("Updating snitch file")
+        logger.debug("Updating snitch file")
         for node in cluster.nodelist():
             with open(os.path.join(node.get_conf_dir(), snitch_config_file), 'w') as topo_file:
                 for line in snitch_lines_after:
@@ -704,7 +712,7 @@ class SnitchConfigurationUpdateTest(Tester):
 
         # wait until the config is reloaded, the default check period is
         # 5 seconds so we wait for 10 seconds to be sure
-        debug("Waiting 10 seconds to make sure snitch file is reloaded...")
+        logger.debug("Waiting 10 seconds to make sure snitch file is reloaded...")
         time.sleep(10)
 
         # check racks have not changed
@@ -723,7 +731,7 @@ class SnitchConfigurationUpdateTest(Tester):
         """
         expected_error = (r"Cannot start node if snitch's data center (.*) differs from previous data center (.*)\. "
                           "Please fix the snitch configuration, decommission and rebootstrap this node or use the flag -Dcassandra.ignore_dc=true.")
-        self.ignore_log_patterns = [expected_error]
+        self.fixture_dtest_setup.ignore_log_patterns = [expected_error]
 
         cluster = self.cluster
         cluster.populate(1)
@@ -744,4 +752,4 @@ class SnitchConfigurationUpdateTest(Tester):
 
         mark = node.mark_log()
         node.start()
-        node.watch_log_for(expected_error, from_mark=mark, timeout=10)
+        node.watch_log_for(expected_error, from_mark=mark, timeout=120)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/requirements.txt
----------------------------------------------------------------------
diff --git a/requirements.txt b/requirements.txt
index bf46e38..388a8a9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,21 +1,18 @@
-# See python driver docs: futures and six have to be installed before
-# cythonizing the driver, perhaps only on old pips.
-# http://datastax.github.io/python-driver/installation.html#cython-based-extensions
-futures
-six
 -e git+https://github.com/datastax/python-driver.git@cassandra-test#egg=cassandra-driver
 # Used ccm version is tracked by cassandra-test branch in ccm repo. Please create a PR there for fixes or upgrades to new releases.
--e git+https://github.com/pcmanus/ccm.git@cassandra-test#egg=ccm
-cql
+-e git+https://github.com/riptano/ccm.git@cassandra-test#egg=ccm
+cqlsh
 decorator
 docopt
 enum34
 flaky
 mock
-nose
-nose-test-select
+pytest
+pytest-timeout
 parse
 pycodestyle
 psutil
-pycassa
-thrift==0.9.3
+thrift==0.10.0
+netifaces
+beautifulsoup4
+lxml

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/run_dtests.py
----------------------------------------------------------------------
diff --git a/run_dtests.py b/run_dtests.py
index e7165af..198cde2 100755
--- a/run_dtests.py
+++ b/run_dtests.py
@@ -1,264 +1,280 @@
 #!/usr/bin/env python
 """
-Usage: run_dtests.py [--nose-options NOSE_OPTIONS] [TESTS...] [--vnodes VNODES_OPTIONS...]
-                 [--runner-debug | --runner-quiet] [--dry-run]
-
-nosetests options:
-    --nose-options NOSE_OPTIONS  specify options to pass to `nosetests`.
-    TESTS                        space-separated list of tests to pass to `nosetests`
-
-script configuration options:
-    --runner-debug -d            print debug statements in this script
-    --runner-quiet -q            quiet all output from this script
-
-cluster configuration options:
-    --vnodes VNODES_OPTIONS...   specify whether to run with or without vnodes.
-                                 valid values: 'true' and 'false'
-
-example:
-    The following command will execute nosetests with the '-v' (verbose) option, vnodes disabled, and run a single test:
-    ./run_dtests.py --nose-options -v --vnodes false repair_tests/repair_test.py:TestRepair.token_range_repair_test_with_cf
-
+usage: run_dtests.py [-h] [--use-vnodes] [--use-off-heap-memtables] [--num-tokens NUM_TOKENS] [--data-dir-count-per-instance DATA_DIR_COUNT_PER_INSTANCE] [--force-resource-intensive-tests]
+                     [--skip-resource-intensive-tests] [--cassandra-dir CASSANDRA_DIR] [--cassandra-version CASSANDRA_VERSION] [--delete-logs] [--execute-upgrade-tests] [--disable-active-log-watching]
+                     [--keep-test-dir] [--enable-jacoco-code-coverage] [--dtest-enable-debug-logging] [--dtest-print-tests-only] [--dtest-print-tests-output DTEST_PRINT_TESTS_OUTPUT]
+                     [--pytest-options PYTEST_OPTIONS] [--dtest-tests DTEST_TESTS]
+
+optional arguments:
+  -h, --help                                                 show this help message and exit
+  --use-vnodes                                               Determines wither or not to setup clusters using vnodes for tests (default: False)
+  --use-off-heap-memtables                                   Enable Off Heap Memtables when creating test clusters for tests (default: False)
+  --num-tokens NUM_TOKENS                                    Number of tokens to set num_tokens yaml setting to when creating instances with vnodes enabled (default: 256)
+  --data-dir-count-per-instance DATA_DIR_COUNT_PER_INSTANCE  Control the number of data directories to create per instance (default: 3)
+  --force-resource-intensive-tests                           Forces the execution of tests marked as resource_intensive (default: False)
+  --skip-resource-intensive-tests                            Skip all tests marked as resource_intensive (default: False)
+  --cassandra-dir CASSANDRA_DIR
+  --cassandra-version CASSANDRA_VERSION
+  --delete-logs
+  --execute-upgrade-tests                                    Execute Cassandra Upgrade Tests (e.g. tests annotated with the upgrade_test mark) (default: False)
+  --disable-active-log-watching                              Disable ccm active log watching, which will cause dtests to check for errors in the logs in a single operation instead of semi-realtime
+                                                             processing by consuming ccm _log_error_handler callbacks (default: False)
+  --keep-test-dir                                            Do not remove/cleanup the test ccm cluster directory and it's artifacts after the test completes (default: False)
+  --enable-jacoco-code-coverage                              Enable JaCoCo Code Coverage Support (default: False)
+  --dtest-enable-debug-logging                               Enable debug logging (for this script, pytest, and during execution of test functions) (default: False)
+  --dtest-print-tests-only                                   Print list of all tests found eligible for execution given the provided options. (default: False)
+  --dtest-print-tests-output DTEST_PRINT_TESTS_OUTPUT        Path to file where the output of --dtest-print-tests-only should be written to (default: False)
+  --pytest-options PYTEST_OPTIONS                            Additional command line arguments to proxy directly thru when invoking pytest. (default: None)
+  --dtest-tests DTEST_TESTS                                  Comma separated list of test files, test classes, or test methods to execute. (default: None)
 """
-from __future__ import print_function
-
 import subprocess
 import sys
 import os
-from collections import namedtuple
-from itertools import product
-from os import getcwd, environ
-from tempfile import NamedTemporaryFile
-
-from docopt import docopt
-
-from plugins.dtestconfig import GlobalConfigObject
-
-
-# Generate values in a matrix from these lists of values for each attribute
-# not defined in arguments to the runner script.
-default_config_matrix = GlobalConfigObject(
-    vnodes=(True, False),
-)
-
+import re
+import logging
 
-def _noop(*args, **kwargs):
-    pass
-
-
-class ValidationResult(namedtuple('_ValidationResult', ['serialized', 'error_messages'])):
-    """
-    A value to be returned from validation functions. If serialization works,
-    return one with 'serialized' set, otherwise return a list of string on the
-    'error_messages' attribute.
-    """
-    __slots__ = ()
+from os import getcwd
+from tempfile import NamedTemporaryFile
+from bs4 import BeautifulSoup
 
-    def __new__(cls, serialized=None, error_messages=None):
-        if error_messages is None:
-            error_messages = []
+from _pytest.config import Parser
+import argparse
 
-        success_result = serialized is not None
-        failure_result = bool(error_messages)
+from conftest import pytest_addoption
 
-        if success_result + failure_result != 1:
-            msg = ('attempted to instantiate a {cls_name} with serialized='
-                   '{serialized} and error_messages={error_messages}. {cls_name} '
-                   'objects must be instantiated with either a serialized or '
-                   'error_messages argument, but not both.')
-            msg = msg.format(cls_name=cls.__name__,
-                             serialized=serialized,
-                             error_messages=error_messages)
-            raise ValueError(msg)
+logger = logging.getLogger(__name__)
 
-        return super(ValidationResult, cls).__new__(cls, serialized=serialized, error_messages=error_messages)
 
+class RunDTests():
+    def run(self, argv):
+        parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog,
+                                                                                                             max_help_position=100,
+                                                                                                             width=200))
 
-def _validate_and_serialize_vnodes(vnodes_value):
-    """
-    Validate the values received for vnodes configuration. Returns a
-    ValidationResult.
+        # this is a bit ugly: all of our command line arguments are added and configured as part
+        # of pytest. however, we also have this wrapper script to make it easier for those who
+        # aren't comfortable calling pytest directly. To avoid duplicating code (e.g. have the options
+        # in two separate places) we directly use the pytest_addoption fixture from conftest.py. Unfortunately,
+        # pytest wraps ArgumentParser, so, first we add the options to a pytest Parser, and then we pull
+        # all of those custom options out and add them to the unwrapped ArgumentParser we want to use
+        # here inside of run_dtests.py.
+        #
+        # So NOTE: to add a command line argument, if you're trying to do so by adding it here, you're doing it wrong!
+        # add it to conftest.py:pytest_addoption
+        pytest_parser = Parser()
+        pytest_addoption(pytest_parser)
+
+        # add all of the options from the pytest Parser we created, and add them into our ArgumentParser instance
+        pytest_custom_opts = pytest_parser._anonymous
+        for opt in pytest_custom_opts.options:
+            parser.add_argument(opt._long_opts[0], action=opt._attrs['action'],
+                                default=opt._attrs.get('default', None),
+                                help=opt._attrs.get('help', None))
+
+        parser.add_argument("--dtest-enable-debug-logging", action="store_true", default=False,
+                            help="Enable debug logging (for this script, pytest, and during execution "
+                                 "of test functions)")
+        parser.add_argument("--dtest-print-tests-only", action="store_true", default=False,
+                            help="Print list of all tests found eligible for execution given the provided options.")
+        parser.add_argument("--dtest-print-tests-output", action="store", default=False,
+                            help="Path to file where the output of --dtest-print-tests-only should be written to")
+        parser.add_argument("--pytest-options", action="store", default=None,
+                            help="Additional command line arguments to proxy directly thru when invoking pytest.")
+        parser.add_argument("--dtest-tests", action="store", default=None,
+                            help="Comma separated list of test files, test classes, or test methods to execute.")
+
+        args = parser.parse_args()
+
+        if not args.dtest_print_tests_only and args.cassandra_dir is None:
+            if args.cassandra_version is None:
+                raise Exception("Required dtest arguments were missing! You must provide either --cassandra-dir "
+                                "or --cassandra-version. Refer to the documentation or invoke the help with --help.")
+
+        if args.dtest_enable_debug_logging:
+            logging.root.setLevel(logging.DEBUG)
+            logger.setLevel(logging.DEBUG)
+
+        # Get dictionaries corresponding to each point in the configuration matrix
+        # we want to run, then generate a config object for each of them.
+        logger.debug('Generating configurations from the following matrix:\n\t{}'.format(args))
+
+        args_to_invoke_pytest = []
+        if args.pytest_options:
+            for arg in args.pytest_options.split(" "):
+                args_to_invoke_pytest.append("'{the_arg}'".format(the_arg=arg))
+
+        for arg in argv:
+            if arg.startswith("--pytest-options") or arg.startswith("--dtest-"):
+                continue
+            args_to_invoke_pytest.append("'{the_arg}'".format(the_arg=arg))
+
+        if args.dtest_print_tests_only:
+            args_to_invoke_pytest.append("'--collect-only'")
+
+        if args.dtest_tests:
+            for test in args.dtest_tests.split(","):
+                args_to_invoke_pytest.append("'{test_name}'".format(test_name=test))
+
+        original_raw_cmd_args = ", ".join(args_to_invoke_pytest)
+
+        logger.debug("args to call with: [%s]" % original_raw_cmd_args)
+
+        # the original run_dtests.py script did it like this to hack around nosetest
+        # limitations -- i'm not sure if they still apply or not in a pytest world
+        # but for now just leaving it as is, because it does the job (although
+        # certainly is still pretty complicated code and has a hacky feeling)
+        to_execute = (
+                "import pytest\n" +
+                (
+                "pytest.main([{options}])\n").format(options=original_raw_cmd_args)
+        )
+        temp = NamedTemporaryFile(dir=getcwd())
+        logger.debug('Writing the following to {}:'.format(temp.name))
 
-    If the values validate, return a ValidationResult with 'serialized' set to
-    the equivalent of:
+        logger.debug('```\n{to_execute}```\n'.format(to_execute=to_execute))
+        temp.write(to_execute.encode("utf-8"))
+        temp.flush()
 
-        tuple(set({'true': True, 'false':False}[v.lower()] for v in vnodes_value))
+        # We pass nose_argv as options to the python call to maintain
+        # compatibility with the nosetests command. Arguments passed in via the
+        # command line are treated one way, args passed in as
+        # nose.main(argv=...) are treated another. Compare with the options
+        # -xsv for an example.
+        cmd_list = [sys.executable, temp.name]
+        logger.debug('subprocess.call-ing {cmd_list}'.format(cmd_list=cmd_list))
 
-    If the values don't validate, return a ValidationResult with 'messages' set
-    to a list of strings, each of which points out an invalid value.
-    """
-    messages = []
-    vnodes_value = set(v.lower() for v in vnodes_value)
-    value_map = {'true': True, 'false': False}
+        sp = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
 
-    for v in vnodes_value:
-        if v not in value_map:
-            messages.append('{} not a valid value for --vnodes option. '
-                            'valid values are {} (case-insensitive)'.format(v, ', '.join(list(value_map))))
+        if args.dtest_print_tests_only:
+            stdout, stderr = sp.communicate()
 
-    if messages:
-        return ValidationResult(error_messages=messages)
+            if stderr:
+                print(stderr.decode("utf-8"))
+                result = sp.returncode
+                exit(result)
 
-    serialized = tuple({value_map[v] for v in vnodes_value})
-    return ValidationResult(serialized=serialized)
+            all_collected_test_modules = collect_test_modules(stdout)
+            joined_test_modules = "\n".join(all_collected_test_modules)
+            #print("Collected %d Test Modules" % len(all_collected_test_modules))
+            if args.dtest_print_tests_output is not None:
+                collected_tests_output_file = open(args.dtest_print_tests_output, "w")
+                collected_tests_output_file.write(joined_test_modules)
+                collected_tests_output_file.close()
 
+            print(joined_test_modules)
+        else:
+            while True:
+                stdout_output = sp.stdout.readline()
+                stdout_output_str = stdout_output.decode("utf-8")
+                if stdout_output_str == '' and sp.poll() is not None:
+                    break
+                if stdout_output_str:
+                    print(stdout_output_str.strip())
 
-def validate_and_serialize_options(docopt_options):
-    """
-    For each value that should be configured for a config object, attempt to
-    serialize the passed-in strings into objects that can be used for
-    configuration. If no values were passed in, use the list of options from
-    the defaults above.
+                stderr_output = sp.stderr.readline()
+                stderr_output_str = stderr_output.decode("utf-8")
+                if stderr_output_str == '' and sp.poll() is not None:
+                    break
+                if stderr_output_str:
+                    print(stderr_output_str.strip())
 
-    Raises a ValueError and prints an error message if any values are invalid
-    or didn't serialize correctly.
-    """
-    vnodes = _validate_and_serialize_vnodes(docopt_options['--vnodes'])
-    if vnodes.error_messages:
-        raise ValueError('Validation error:\n{}'.format('\t\n'.join(list(vnodes.error_messages))))
-    return GlobalConfigObject(
-        vnodes=vnodes.serialized or default_config_matrix.vnodes
-    )
+        exit(sp.returncode)
 
 
-def product_of_values(d):
+def collect_test_modules(stdout):
     """
-    Transforms a dictionary of {key: list(configuration_options} into a tuple
-    of dictionaries, each corresponding to a point in the product, with the
-    values preserved at the keys where they were found in the argument.
-
-    This is difficult to explain and is probably best demonstrated with an
-    example:
-
-        >>> from pprint import pprint
-        >>> from runner import product_of_values
-        >>> pprint(product_of_values(
-        ...     {'a': [1, 2, 3],
-        ...      'b': [4, 5, 6]}
-        ... ))
-        ({'a': 1, 'b': 4},
-         {'a': 1, 'b': 5},
-         {'a': 1, 'b': 6},
-         {'a': 2, 'b': 4},
-         {'a': 2, 'b': 5},
-         {'a': 2, 'b': 6},
-         {'a': 3, 'b': 4},
-         {'a': 3, 'b': 5},
-         {'a': 3, 'b': 6})
-
-    So, in this case, we get something like
-
-        for a_value in d['a']:
-            for b_value in d['b']:
-                yield {'a': a_value, 'b': b_value}
-
-    This method does that, but for dictionaries with arbitrary iterables at
-    arbitrary numbers of keys.
+    Takes the xml-ish (no, it's not actually xml so we need to format it a bit) --collect-only output as printed
+    by pytest to stdout and normalizes it to get a list of all collected tests in a human friendly format
+    :param stdout: the stdout from pytest (should have been invoked with the --collect-only cmdline argument)
+    :return: a formatted list of collected test modules in format test_file.py::TestClass::test_function
     """
-
-    # transform, e.g., {'a': [1, 2, 3], 'b': [4, 5, 6]} into
-    # [[('a', 1), ('a', 2), ('a', 3)],
-    #  [('b', 4), ('b', 5), ('b', 6)]]
-    tuple_list = [[(k, v) for v in v_list] for k, v_list in d.items()]
-
-    # return the cartesian product of the flattened dict
-    return tuple(dict(result) for result in product(*tuple_list))
+    # unfortunately, pytest emits xml like output -- but it's not actually xml, so we'll fail to parse
+    # if we try. first step is to fix up the pytest output to create well formatted xml
+    xml_line_regex_pattern = re.compile("^([\s])*<(Module|Class|Function|Instance) '(.*)'>")
+    is_first_module = True
+    is_first_class = True
+    has_closed_class = False
+    section_has_instance = False
+    section_has_class = False
+    test_collect_xml_lines = []
+
+    test_collect_xml_lines.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
+    test_collect_xml_lines.append("<Modules>")
+    for line in stdout.decode("utf-8").split('\n'):
+        re_ret = re.search(xml_line_regex_pattern, line)
+        if re_ret:
+            if not is_first_module and re_ret.group(2) == "Module":
+                if section_has_instance:
+                    test_collect_xml_lines.append("      </Instance>")
+                if section_has_class:
+                    test_collect_xml_lines.append("    </Class>")
+
+                test_collect_xml_lines.append("  </Module>")
+                is_first_class = True
+                has_closed_class= False
+                section_has_instance = False
+                section_has_class = False
+                is_first_module = False
+            elif is_first_module and re_ret.group(2) == "Module":
+                if not has_closed_class and section_has_instance:
+                    test_collect_xml_lines.append("      </Instance>")
+                if not has_closed_class and section_has_class:
+                    test_collect_xml_lines.append("    </Class>")
+
+                is_first_class = True
+                is_first_module = False
+                has_closed_class = False
+                section_has_instance = False
+                section_has_class = False
+            elif re_ret.group(2) == "Instance":
+                section_has_instance = True
+            elif not is_first_class and re_ret.group(2) == "Class":
+                if section_has_instance:
+                    test_collect_xml_lines.append("      </Instance>")
+                if section_has_class:
+                    test_collect_xml_lines.append("    </Class>")
+                has_closed_class = True
+                section_has_class = True
+            elif re_ret.group(2) == "Class":
+                is_first_class = False
+                section_has_class = True
+                has_closed_class = False
+
+            if re_ret.group(2) == "Function":
+                test_collect_xml_lines.append("          <Function name=\"{name}\"></Function>"
+                                              .format(name=re_ret.group(3)))
+            elif re_ret.group(2) == "Class":
+                test_collect_xml_lines.append("    <Class name=\"{name}\">".format(name=re_ret.group(3)))
+            elif re_ret.group(2) == "Module":
+                test_collect_xml_lines.append("  <Module name=\"{name}\">".format(name=re_ret.group(3)))
+            elif re_ret.group(2) == "Instance":
+                test_collect_xml_lines.append("      <Instance name=\"\">".format(name=re_ret.group(3)))
+            else:
+                test_collect_xml_lines.append(line)
+
+    test_collect_xml_lines.append("      </Instance>")
+    test_collect_xml_lines.append("    </Class>")
+    test_collect_xml_lines.append("  </Module>")
+    test_collect_xml_lines.append("</Modules>")
+
+    all_collected_test_modules = []
+
+    # parse the now valid xml
+    print("\n".join(test_collect_xml_lines))
+    test_collect_xml = BeautifulSoup("\n".join(test_collect_xml_lines), "lxml-xml")
+
+    # find all Modules (followed by classes in those modules, and then finally functions)
+    for pytest_module in test_collect_xml.findAll("Module"):
+        for test_class_name in pytest_module.findAll("Class"):
+            for function_name in test_class_name.findAll("Function"):
+                # adds to test list in format like test_file.py::TestClass::test_function for every test function found
+                all_collected_test_modules.append("{module_name}::{class_name}::{function_name}"
+                                                  .format(module_name=pytest_module.attrs['name'],
+                                                          class_name=test_class_name.attrs['name'],
+                                                          function_name=function_name.attrs['name']))
+
+    return all_collected_test_modules
 
 
 if __name__ == '__main__':
-    options = docopt(__doc__)
-    validated_options = validate_and_serialize_options(options)
-
-    nose_options = options['--nose-options'] or ''
-    nose_option_list = nose_options.split()
-    test_list = options['TESTS']
-    nose_argv = nose_option_list + test_list
-
-    verbosity = 1  # default verbosity level
-    if options['--runner-debug']:
-        verbosity = 2
-    if options['--runner-quiet']:  # --debug and --quiet are mutually exclusive, enforced by docopt
-        verbosity = 0
-
-    debug = print if verbosity >= 2 else _noop
-    output = print if verbosity >= 1 else _noop
-
-    # Get dictionaries corresponding to each point in the configuration matrix
-    # we want to run, then generate a config object for each of them.
-    debug('Generating configurations from the following matrix:\n\t{}'.format(validated_options))
-    all_configs = tuple(GlobalConfigObject(**d) for d in
-                        product_of_values(validated_options._asdict()))
-    output('About to run nosetests with config objects:\n'
-           '\t{configs}\n'.format(configs='\n\t'.join(map(repr, all_configs))))
-
-    results = []
-    for config in all_configs:
-        # These properties have to hold if we want to evaluate their reprs
-        # below in the generated file.
-        assert eval(repr(config), {'GlobalConfigObject': GlobalConfigObject}, {}) == config
-        assert eval(repr(nose_argv), {}, {}) == nose_argv
-
-        output('Running dtests with config object {}'.format(config))
-
-        # Generate a file that runs nose, passing in config as the
-        # configuration object.
-        #
-        # Yes, this is icky. The reason we do it is because we're dealing with
-        # global configuration. We've decided global, nosetests-run-level
-        # configuration is the way to go. This means we don't want to call
-        # nose.main() multiple times in the same Python interpreter -- I have
-        # not yet found a way to re-execute modules (thus getting new
-        # module-level configuration) for each call. This didn't even work for
-        # me with exec(script, {}, {}). So, here we are.
-        #
-        # How do we execute code in a new interpreter each time? Generate the
-        # code as text, then shell out to a new interpreter.
-        to_execute = (
-            "import nose\n" +
-            "from plugins.dtestconfig import DtestConfigPlugin, GlobalConfigObject\n" +
-            "from plugins.dtestxunit import DTestXunit\n" +
-            "from plugins.dtesttag import DTestTag\n" +
-            "from plugins.dtestcollect import DTestCollect\n" +
-            "import sys\n" +
-            "print sys.getrecursionlimit()\n" +
-            "print sys.setrecursionlimit(8000)\n" +
-            ("nose.main(addplugins=[DtestConfigPlugin({config}), DTestXunit(), DTestCollect(), DTestTag()])\n" if "TEST_TAG" in environ else "nose.main(addplugins=[DtestConfigPlugin({config}), DTestCollect(), DTestXunit()])\n")
-        ).format(config=repr(config))
-        temp = NamedTemporaryFile(dir=getcwd())
-        debug('Writing the following to {}:'.format(temp.name))
-
-        debug('```\n{to_execute}```\n'.format(to_execute=to_execute))
-        temp.write(to_execute)
-        temp.flush()
-
-        # We pass nose_argv as options to the python call to maintain
-        # compatibility with the nosetests command. Arguments passed in via the
-        # command line are treated one way, args passed in as
-        # nose.main(argv=...) are treated another. Compare with the options
-        # -xsv for an example.
-        cmd_list = [sys.executable, temp.name] + nose_argv
-        debug('subprocess.call-ing {cmd_list}'.format(cmd_list=cmd_list))
-
-        if options['--dry-run']:
-            print('Would run the following command:\n\t{}'.format(cmd_list))
-            with open(temp.name, 'r') as f:
-                contents = f.read()
-            print('{temp_name} contains:\n```\n{contents}```\n'.format(
-                temp_name=temp.name,
-                contents=contents
-            ))
-        else:
-            results.append(subprocess.call(cmd_list, env=os.environ.copy()))
-        # separate the end of the last subprocess.call output from the
-        # beginning of the next by printing a newline.
-        print()
-
-    # If this answer:
-    # http://stackoverflow.com/a/21788998/3408454
-    # is to be believed, nosetests will exit with 0 on success, 1 on test or
-    # other failure, and 2 on printing usage. We'll just grab the max of the
-    # runs we saw -- if one printed usage, the whole run "printed usage", if
-    # none printed usage, and one or more failed, we failed, else success.
-    if not results:
-        results = [0]
-    exit(max(results))
+    RunDTests().run(sys.argv[1:])

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/schema_metadata_test.py
----------------------------------------------------------------------
diff --git a/schema_metadata_test.py b/schema_metadata_test.py
index baf8b5a..fdfcf56 100644
--- a/schema_metadata_test.py
+++ b/schema_metadata_test.py
@@ -1,10 +1,13 @@
+import pytest
+import logging
+
 from collections import defaultdict
 from uuid import uuid4
 
-from nose.tools import assert_equal, assert_in
+from dtest import Tester, create_ks
 
-from dtest import Tester, debug, create_ks
-from tools.decorators import since
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 def establish_durable_writes_keyspace(version, session, table_name_prefix=""):
@@ -32,11 +35,10 @@ def verify_durable_writes_keyspace(created_on_version, current_version, keyspace
         "durable_writes_true": True,
         "durable_writes_false": False
     }
-    for keyspace, is_durable in expected.iteritems():
+    for keyspace, is_durable in expected.items():
         keyspace_name = _cql_name_builder(table_name_prefix, keyspace)
         meta = session.cluster.metadata.keyspaces[keyspace_name]
-        assert_equal(is_durable, meta.durable_writes,
-                     "keyspace [{}] had durable_writes of [{}] should be [{}]".format(keyspace_name, meta.durable_writes, is_durable))
+        assert is_durable == meta.durable_writes, "keyspace [{}] had durable_writes of [{}] should be [{}]".format(keyspace_name, meta.durable_writes, is_durable)
 
 
 def establish_indexes_table(version, session, table_name_prefix=""):
@@ -53,29 +55,29 @@ def establish_indexes_table(version, session, table_name_prefix=""):
 
     session.execute(cql.format(table_name))
     index_name = _cql_name_builder("idx_" + table_name_prefix, table_name)
-    debug("table name: [{}], index name: [{}], prefix: [{}]".format(table_name, index_name, table_name_prefix))
+    logger.debug("table name: [{}], index name: [{}], prefix: [{}]".format(table_name, index_name, table_name_prefix))
     session.execute("CREATE INDEX {0} ON {1}( d )".format(index_name, table_name))
 
 
 def verify_indexes_table(created_on_version, current_version, keyspace, session, table_name_prefix=""):
     table_name = _cql_name_builder(table_name_prefix, "test_indexes")
     index_name = _cql_name_builder("idx_" + table_name_prefix, table_name)
-    debug("table name: [{}], index name: [{}], prefix: [{}]".format(table_name, index_name, table_name_prefix))
+    logger.debug("table name: [{}], index name: [{}], prefix: [{}]".format(table_name, index_name, table_name_prefix))
     meta = session.cluster.metadata.keyspaces[keyspace].indexes[index_name]
 
-    assert_equal('d', meta.index_options['target'])
+    assert 'd' == meta.index_options['target']
 
     meta = session.cluster.metadata.keyspaces[keyspace].tables[table_name]
-    assert_equal(1, len(meta.clustering_key))
-    assert_equal('c', meta.clustering_key[0].name)
+    assert 1 == len(meta.clustering_key)
+    assert 'c' == meta.clustering_key[0].name
 
-    assert_equal(1, len(meta.indexes))
+    assert 1 == len(meta.indexes)
 
-    assert_equal({'target': 'd'}, meta.indexes[index_name].index_options)
-    assert_equal(3, len(meta.primary_key))
-    assert_equal('a', meta.primary_key[0].name)
-    assert_equal('b', meta.primary_key[1].name)
-    assert_equal('c', meta.primary_key[2].name)
+    assert {'target': 'd'} == meta.indexes[index_name].index_options
+    assert 3 == len(meta.primary_key)
+    assert 'a' == meta.primary_key[0].name
+    assert 'b' == meta.primary_key[1].name
+    assert 'c' == meta.primary_key[2].name
 
 
 def establish_clustering_order_table(version, session, table_name_prefix=""):
@@ -96,13 +98,13 @@ def establish_clustering_order_table(version, session, table_name_prefix=""):
 def verify_clustering_order_table(created_on_version, current_version, keyspace, session, table_name_prefix=""):
     table_name = _cql_name_builder(table_name_prefix, "test_clustering_order")
     meta = session.cluster.metadata.keyspaces[keyspace].tables[table_name]
-    assert_equal(0, len(meta.indexes))
-    assert_equal(2, len(meta.primary_key))
-    assert_equal('event_type', meta.primary_key[0].name)
-    assert_equal('insertion_time', meta.primary_key[1].name)
-    assert_equal(1, len(meta.clustering_key))
-    assert_equal('insertion_time', meta.clustering_key[0].name)
-    assert_in('insertion_time DESC', meta.as_cql_query())
+    assert 0 == len(meta.indexes)
+    assert 2 == len(meta.primary_key)
+    assert 'event_type' == meta.primary_key[0].name
+    assert 'insertion_time' == meta.primary_key[1].name
+    assert 1 == len(meta.clustering_key)
+    assert 'insertion_time' == meta.clustering_key[0].name
+    assert 'insertion_time DESC' in meta.as_cql_query()
 
 
 def establish_compact_storage_table(version, session, table_name_prefix=""):
@@ -123,16 +125,16 @@ def establish_compact_storage_table(version, session, table_name_prefix=""):
 def verify_compact_storage_table(created_on_version, current_version, keyspace, session, table_name_prefix=""):
     table_name = _cql_name_builder(table_name_prefix, "test_compact_storage")
     meta = session.cluster.metadata.keyspaces[keyspace].tables[table_name]
-    assert_equal(3, len(meta.columns))
-    assert_equal(2, len(meta.primary_key))
-    assert_equal(1, len(meta.clustering_key))
-    assert_equal('sub_block_id', meta.clustering_key[0].name)
-    assert_equal('block_id', meta.primary_key[0].name)
-    assert_equal('uuid', meta.primary_key[0].cql_type)
-    assert_equal('sub_block_id', meta.primary_key[1].name)
-    assert_equal('int', meta.primary_key[1].cql_type)
-    assert_equal(1, len(meta.clustering_key))
-    assert_equal('sub_block_id', meta.clustering_key[0].name)
+    assert 3 == len(meta.columns)
+    assert 2 == len(meta.primary_key)
+    assert 1 == len(meta.clustering_key)
+    assert 'sub_block_id' == meta.clustering_key[0].name
+    assert 'block_id' == meta.primary_key[0].name
+    assert 'uuid' == meta.primary_key[0].cql_type
+    assert 'sub_block_id' == meta.primary_key[1].name
+    assert 'int' == meta.primary_key[1].cql_type
+    assert 1 == len(meta.clustering_key)
+    assert 'sub_block_id' == meta.clustering_key[0].name
 
 
 def establish_compact_storage_composite_table(version, session, table_name_prefix=""):
@@ -153,19 +155,19 @@ def establish_compact_storage_composite_table(version, session, table_name_prefi
 def verify_compact_storage_composite_table(created_on_version, current_version, keyspace, session, table_name_prefix=""):
     table_name = _cql_name_builder(table_name_prefix, "test_compact_storage_composite")
     meta = session.cluster.metadata.keyspaces[keyspace].tables[table_name]
-    assert_equal(4, len(meta.columns))
-    assert_equal(3, len(meta.primary_key))
-    assert_equal('key', meta.primary_key[0].name)
-    assert_equal('text', meta.primary_key[0].cql_type)
-    assert_equal('column1', meta.primary_key[1].name)
-    assert_equal('int', meta.primary_key[1].cql_type)
-    assert_equal('column2', meta.primary_key[2].name)
-    assert_equal('int', meta.primary_key[2].cql_type)
-    assert_equal(2, len(meta.clustering_key))
-    assert_equal('column1', meta.clustering_key[0].name)
-    assert_equal('int', meta.clustering_key[0].cql_type)
-    assert_equal('column2', meta.clustering_key[1].name)
-    assert_equal('int', meta.clustering_key[1].cql_type)
+    assert 4 == len(meta.columns)
+    assert 3 == len(meta.primary_key)
+    assert 'key' == meta.primary_key[0].name
+    assert 'text' == meta.primary_key[0].cql_type
+    assert 'column1' == meta.primary_key[1].name
+    assert 'int' == meta.primary_key[1].cql_type
+    assert 'column2' == meta.primary_key[2].name
+    assert 'int' == meta.primary_key[2].cql_type
+    assert 2 == len(meta.clustering_key)
+    assert 'column1' == meta.clustering_key[0].name
+    assert 'int' == meta.clustering_key[0].cql_type
+    assert 'column2' == meta.clustering_key[1].name
+    assert 'int' == meta.clustering_key[1].cql_type
 
 
 def establish_nondefault_table_settings(version, session, table_name_prefix=""):
@@ -213,45 +215,45 @@ def verify_nondefault_table_settings(created_on_version, current_version, keyspa
     table_name = _cql_name_builder(table_name_prefix, "test_nondefault_settings")
     meta = session.cluster.metadata.keyspaces[keyspace].tables[table_name]
 
-    assert_equal('insightful information', meta.options['comment'])
-    assert_equal(0.88, meta.options['dclocal_read_repair_chance'])
-    assert_equal(9999, meta.options['gc_grace_seconds'])
-    assert_equal(0.99, meta.options['read_repair_chance'])
-    assert_equal(0.5, meta.options['bloom_filter_fp_chance'])
+    assert 'insightful information' == meta.options['comment']
+    assert 0.88 == meta.options['dclocal_read_repair_chance']
+    assert 9999 == meta.options['gc_grace_seconds']
+    assert 0.99 == meta.options['read_repair_chance']
+    assert 0.5 == meta.options['bloom_filter_fp_chance']
 
     if created_on_version >= '2.1':
-        assert_equal(86400, meta.options['default_time_to_live'])
-        assert_equal(1, meta.options['min_index_interval'])
-        assert_equal(20, meta.options['max_index_interval'])
+        assert 86400 == meta.options['default_time_to_live']
+        assert 1 == meta.options['min_index_interval']
+        assert 20 == meta.options['max_index_interval']
 
     if created_on_version >= '3.0':
-        assert_equal('55PERCENTILE', meta.options['speculative_retry'])
-        assert_equal(2121, meta.options['memtable_flush_period_in_ms'])
+        assert '55PERCENTILE' == meta.options['speculative_retry']
+        assert 2121 == meta.options['memtable_flush_period_in_ms']
 
     if current_version >= '3.0':
-        assert_equal('org.apache.cassandra.io.compress.DeflateCompressor', meta.options['compression']['class'])
-        assert_equal('128', meta.options['compression']['chunk_length_in_kb'])
-        assert_equal('org.apache.cassandra.db.compaction.LeveledCompactionStrategy', meta.options['compaction']['class'])
+        assert 'org.apache.cassandra.io.compress.DeflateCompressor' == meta.options['compression']['class']
+        assert '128' == meta.options['compression']['chunk_length_in_kb']
+        assert 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy' == meta.options['compaction']['class']
 
     if '2.1' <= current_version < '3.0':
-        assert_equal('{"keys":"NONE", "rows_per_partition":"ALL"}', meta.options['caching'])
-        assert_in('"chunk_length_kb":"128"', meta.options['compression_parameters'])
-        assert_in('"sstable_compression":"org.apache.cassandra.io.compress.DeflateCompressor"', meta.options['compression_parameters'])
+        assert '{"keys":"NONE", "rows_per_partition":"ALL"}' == meta.options['caching']
+        assert '"chunk_length_kb":"128"' in meta.options['compression_parameters']
+        assert '"sstable_compression":"org.apache.cassandra.io.compress.DeflateCompressor"' in meta.options['compression_parameters']
     elif current_version >= '3.0':
-        assert_equal('NONE', meta.options['caching']['keys'])
-        assert_equal('ALL', meta.options['caching']['rows_per_partition'])
-        assert_equal('org.apache.cassandra.io.compress.DeflateCompressor', meta.options['compression']['class'])
-        assert_equal('128', meta.options['compression']['chunk_length_in_kb'])
-        assert_equal('org.apache.cassandra.db.compaction.LeveledCompactionStrategy', meta.options['compaction']['class'])
+        assert 'NONE' == meta.options['caching']['keys']
+        assert 'ALL' == meta.options['caching']['rows_per_partition']
+        assert 'org.apache.cassandra.io.compress.DeflateCompressor' == meta.options['compression']['class']
+        assert '128' == meta.options['compression']['chunk_length_in_kb']
+        assert 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy' == meta.options['compaction']['class']
     else:
-        assert_equal('ROWS_ONLY', meta.options['caching'])
+        assert 'ROWS_ONLY' == meta.options['caching']
 
-    assert_equal(2, len(meta.partition_key))
-    assert_equal(meta.partition_key[0].name, 'a')
-    assert_equal(meta.partition_key[1].name, 'b')
+    assert 2 == len(meta.partition_key)
+    assert meta.partition_key[0].name == 'a'
+    assert meta.partition_key[1].name == 'b'
 
-    assert_equal(1, len(meta.clustering_key))
-    assert_equal(meta.clustering_key[0].name, 'c')
+    assert 1 == len(meta.clustering_key)
+    assert meta.clustering_key[0].name == 'c'
 
 
 def establish_uda(version, session, table_name_prefix=""):
@@ -281,13 +283,13 @@ def verify_uda(created_on_version, current_version, keyspace, session, table_nam
     function_name = _cql_name_builder(table_name_prefix, "test_uda_function")
     aggregate_name = _cql_name_builder(table_name_prefix, "test_uda_aggregate")
 
-    assert_in(function_name + "(int,int)", session.cluster.metadata.keyspaces[keyspace].functions.keys())
-    assert_in(aggregate_name + "(int)", session.cluster.metadata.keyspaces[keyspace].aggregates.keys())
+    assert function_name + "(int,int)" in list(session.cluster.metadata.keyspaces[keyspace].functions.keys())
+    assert aggregate_name + "(int)" in list(session.cluster.metadata.keyspaces[keyspace].aggregates.keys())
 
     aggr_meta = session.cluster.metadata.keyspaces[keyspace].aggregates[aggregate_name + "(int)"]
-    assert_equal(function_name, aggr_meta.state_func)
-    assert_equal('int', aggr_meta.state_type)
-    assert_equal('int', aggr_meta.return_type)
+    assert function_name == aggr_meta.state_func
+    assert 'int' == aggr_meta.state_type
+    assert 'int' == aggr_meta.return_type
 
 
 def establish_udf(version, session, table_name_prefix=""):
@@ -303,13 +305,13 @@ def verify_udf(created_on_version, current_version, keyspace, session, table_nam
     if created_on_version < '2.2':
         return
     function_name = _cql_name_builder(table_name_prefix, "test_udf")
-    assert_in(function_name + "(double)", session.cluster.metadata.keyspaces[keyspace].functions.keys())
+    assert function_name + "(double)" in list(session.cluster.metadata.keyspaces[keyspace].functions.keys())
     meta = session.cluster.metadata.keyspaces[keyspace].functions[function_name + "(double)"]
-    assert_equal('java', meta.language)
-    assert_equal('double', meta.return_type)
-    assert_equal(['double'], meta.argument_types)
-    assert_equal(['input'], meta.argument_names)
-    assert_equal('return Double.valueOf(Math.log(input.doubleValue()));', meta.body)
+    assert 'java' == meta.language
+    assert 'double' == meta.return_type
+    assert ['double'] == meta.argument_types
+    assert ['input'] == meta.argument_names
+    assert 'return Double.valueOf(Math.log(input.doubleValue()));' == meta.body
 
 
 def establish_udt_table(version, session, table_name_prefix=""):
@@ -330,13 +332,13 @@ def verify_udt_table(created_on_version, current_version, keyspace, session, tab
     table_name = _cql_name_builder(table_name_prefix, "test_udt")
     meta = session.cluster.metadata.keyspaces[keyspace].user_types[table_name]
 
-    assert_equal(meta.field_names, ['street', 'city', 'zip'])
-    assert_equal('street', meta.field_names[0])
-    assert_equal('text', meta.field_types[0])
-    assert_equal('city', meta.field_names[1])
-    assert_equal('text', meta.field_types[1])
-    assert_equal('zip', meta.field_names[2])
-    assert_equal('int', meta.field_types[2])
+    assert meta.field_names == ['street', 'city', 'zip']
+    assert 'street' == meta.field_names[0]
+    assert 'text' == meta.field_types[0]
+    assert 'city' == meta.field_names[1]
+    assert 'text' == meta.field_types[1]
+    assert 'zip' == meta.field_names[2]
+    assert 'int' == meta.field_types[2]
 
 
 def establish_static_column_table(version, session, table_name_prefix=""):
@@ -358,15 +360,15 @@ def verify_static_column_table(created_on_version, current_version, keyspace, se
         return
     table_name = _cql_name_builder(table_name_prefix, "test_static_column")
     meta = session.cluster.metadata.keyspaces[keyspace].tables[table_name]
-    assert_equal(4, len(meta.columns))
-    assert_equal('text', meta.columns['user'].cql_type)
-    assert_equal(False, meta.columns['user'].is_static)
-    assert_equal('int', meta.columns['balance'].cql_type)
-    assert_equal(True, meta.columns['balance'].is_static)
-    assert_equal('int', meta.columns['expense_id'].cql_type)
-    assert_equal(False, meta.columns['expense_id'].is_static)
-    assert_equal('int', meta.columns['amount'].cql_type)
-    assert_equal(False, meta.columns['amount'].is_static)
+    assert 4 == len(meta.columns)
+    assert 'text' == meta.columns['user'].cql_type
+    assert False == meta.columns['user'].is_static
+    assert 'int' == meta.columns['balance'].cql_type
+    assert True == meta.columns['balance'].is_static
+    assert 'int' == meta.columns['expense_id'].cql_type
+    assert False == meta.columns['expense_id'].is_static
+    assert 'int' == meta.columns['amount'].cql_type
+    assert False == meta.columns['amount'].is_static
 
 
 def establish_collection_datatype_table(version, session, table_name_prefix=""):
@@ -399,24 +401,24 @@ def verify_collection_datatype_table(created_on_version, current_version, keyspa
     table_name = _cql_name_builder(table_name_prefix, "test_collection_datatypes")
     meta = session.cluster.metadata.keyspaces[keyspace].tables[table_name]
     if created_on_version > '2.1':
-        assert_equal(13, len(meta.columns))
+        assert 13 == len(meta.columns)
     else:
-        assert_equal(7, len(meta.columns))
+        assert 7 == len(meta.columns)
 
-    assert_equal('list<int>', meta.columns['a'].cql_type)
-    assert_equal('list<text>', meta.columns['b'].cql_type)
-    assert_equal('set<int>', meta.columns['c'].cql_type)
-    assert_equal('set<text>', meta.columns['d'].cql_type)
-    assert_equal('map<text, text>', meta.columns['e'].cql_type)
-    assert_equal('map<text, int>', meta.columns['f'].cql_type)
+    assert 'list<int>' == meta.columns['a'].cql_type
+    assert 'list<text>' == meta.columns['b'].cql_type
+    assert 'set<int>' == meta.columns['c'].cql_type
+    assert 'set<text>' == meta.columns['d'].cql_type
+    assert 'map<text, text>' == meta.columns['e'].cql_type
+    assert 'map<text, int>' == meta.columns['f'].cql_type
 
     if created_on_version > '2.1':
-        assert_equal('frozen<list<int>>', meta.columns['g'].cql_type)
-        assert_equal('frozen<list<text>>', meta.columns['h'].cql_type)
-        assert_equal('frozen<set<int>>', meta.columns['i'].cql_type)
-        assert_equal('frozen<set<text>>', meta.columns['j'].cql_type)
-        assert_equal('frozen<map<text, text>>', meta.columns['k'].cql_type)
-        assert_equal('frozen<map<text, int>>', meta.columns['l'].cql_type)
+        assert 'frozen<list<int>>' == meta.columns['g'].cql_type
+        assert 'frozen<list<text>>' == meta.columns['h'].cql_type
+        assert 'frozen<set<int>>' == meta.columns['i'].cql_type
+        assert 'frozen<set<text>>' == meta.columns['j'].cql_type
+        assert 'frozen<map<text, text>>' == meta.columns['k'].cql_type
+        assert 'frozen<map<text, int>>' == meta.columns['l'].cql_type
 
 
 def establish_basic_datatype_table(version, session, table_name_prefix=""):
@@ -453,33 +455,33 @@ def verify_basic_datatype_table(created_on_version, current_version, keyspace, s
     table_name = _cql_name_builder(table_name_prefix, "test_basic_datatypes")
     meta = session.cluster.metadata.keyspaces[keyspace].tables[table_name]
     if created_on_version > '2.2':
-        assert_equal(19, len(meta.columns))
+        assert 19 == len(meta.columns)
     else:
-        assert_equal(15, len(meta.columns))
-
-    assert_equal(1, len(meta.primary_key))
-    assert_equal('b', meta.primary_key[0].name)
-
-    assert_equal('ascii', meta.columns['a'].cql_type)
-    assert_equal('bigint', meta.columns['b'].cql_type)
-    assert_equal('blob', meta.columns['c'].cql_type)
-    assert_equal('boolean', meta.columns['d'].cql_type)
-    assert_equal('decimal', meta.columns['e'].cql_type)
-    assert_equal('double', meta.columns['f'].cql_type)
-    assert_equal('float', meta.columns['g'].cql_type)
-    assert_equal('inet', meta.columns['h'].cql_type)
-    assert_equal('int', meta.columns['i'].cql_type)
-    assert_equal('text', meta.columns['j'].cql_type)
-    assert_equal('timestamp', meta.columns['k'].cql_type)
-    assert_equal('timeuuid', meta.columns['l'].cql_type)
-    assert_equal('uuid', meta.columns['m'].cql_type)
-    assert_equal('text', meta.columns['n'].cql_type)
-    assert_equal('varint', meta.columns['o'].cql_type)
+        assert 15 == len(meta.columns)
+
+    assert 1 == len(meta.primary_key)
+    assert 'b' == meta.primary_key[0].name
+
+    assert 'ascii' == meta.columns['a'].cql_type
+    assert 'bigint' == meta.columns['b'].cql_type
+    assert 'blob' == meta.columns['c'].cql_type
+    assert 'boolean' == meta.columns['d'].cql_type
+    assert 'decimal' == meta.columns['e'].cql_type
+    assert 'double' == meta.columns['f'].cql_type
+    assert 'float' == meta.columns['g'].cql_type
+    assert 'inet' == meta.columns['h'].cql_type
+    assert 'int' == meta.columns['i'].cql_type
+    assert 'text' == meta.columns['j'].cql_type
+    assert 'timestamp' == meta.columns['k'].cql_type
+    assert 'timeuuid' == meta.columns['l'].cql_type
+    assert 'uuid' == meta.columns['m'].cql_type
+    assert 'text' == meta.columns['n'].cql_type
+    assert 'varint' == meta.columns['o'].cql_type
     if created_on_version > '2.2':
-        assert_equal('date', meta.columns['p'].cql_type)
-        assert_equal('smallint', meta.columns['q'].cql_type)
-        assert_equal('time', meta.columns['r'].cql_type)
-        assert_equal('tinyint', meta.columns['s'].cql_type)
+        assert 'date' == meta.columns['p'].cql_type
+        assert 'smallint' == meta.columns['q'].cql_type
+        assert 'time' == meta.columns['r'].cql_type
+        assert 'tinyint' == meta.columns['s'].cql_type
 
 
 def _cql_name_builder(prefix, table_name):
@@ -495,10 +497,9 @@ def _cql_name_builder(prefix, table_name):
 
 
 class TestSchemaMetadata(Tester):
-
-    def setUp(self):
-        Tester.setUp(self)
-        cluster = self.cluster
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_set_cluster_settings(self, fixture_dtest_setup):
+        cluster = fixture_dtest_setup.cluster
         cluster.schema_event_refresh_window = 0
 
         if cluster.version() >= '3.0':
@@ -508,101 +509,101 @@ class TestSchemaMetadata(Tester):
             cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
         cluster.populate(1).start()
 
-        self.session = self.patient_cql_connection(cluster.nodelist()[0])
+        self.session = fixture_dtest_setup.patient_cql_connection(cluster.nodelist()[0])
         create_ks(self.session, 'ks', 1)
 
     def _keyspace_meta(self, keyspace_name="ks"):
         self.session.cluster.refresh_schema_metadata()
         return self.session.cluster.metadata.keyspaces[keyspace_name]
 
-    def creating_and_dropping_keyspace_test(self):
+    def test_creating_and_dropping_keyspace(self):
         starting_keyspace_count = len(self.session.cluster.metadata.keyspaces)
-        self.assertEqual(True, self._keyspace_meta().durable_writes)
+        assert True == self._keyspace_meta().durable_writes
         self.session.execute("""
                 CREATE KEYSPACE so_long
                     WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}
                     AND durable_writes = false
             """)
-        self.assertEqual(False, self._keyspace_meta('so_long').durable_writes)
+        assert False == self._keyspace_meta('so_long').durable_writes
         self.session.execute("DROP KEYSPACE so_long")
-        self.assertEqual(starting_keyspace_count, len(self.session.cluster.metadata.keyspaces))
+        assert starting_keyspace_count == len(self.session.cluster.metadata.keyspaces)
 
-    def creating_and_dropping_table_test(self):
+    def test_creating_and_dropping_table(self):
         self.session.execute("create table born_to_die (id uuid primary key, name varchar)")
         meta = self._keyspace_meta().tables['born_to_die']
-        self.assertEqual('ks', meta.keyspace_name)
-        self.assertEqual('born_to_die', meta.name)
-        self.assertEqual(1, len(meta.partition_key))
-        self.assertEqual('id', meta.partition_key[0].name)
-        self.assertEqual(2, len(meta.columns))
-        self.assertIsNotNone(meta.columns.get('id'))
-        self.assertEqual('uuid', meta.columns['id'].cql_type)
-        self.assertIsNotNone(meta.columns.get('name'))
-        self.assertEqual('text', meta.columns['name'].cql_type)
-        self.assertEqual(0, len(meta.clustering_key))
-        self.assertEqual(0, len(meta.triggers))
-        self.assertEqual(0, len(meta.indexes))
+        assert 'ks' == meta.keyspace_name
+        assert 'born_to_die' == meta.name
+        assert 1 == len(meta.partition_key)
+        assert 'id' == meta.partition_key[0].name
+        assert 2 == len(meta.columns)
+        assert meta.columns.get('id') is not None
+        assert 'uuid' == meta.columns['id'].cql_type
+        assert meta.columns.get('name') is not None
+        assert 'text' == meta.columns['name'].cql_type
+        assert 0 == len(meta.clustering_key)
+        assert 0 == len(meta.triggers)
+        assert 0 == len(meta.indexes)
         self.session.execute("drop table born_to_die")
-        self.assertIsNone(self._keyspace_meta().tables.get('born_to_die'))
+        assert self._keyspace_meta().tables.get('born_to_die') is None
 
-    def creating_and_dropping_table_with_2ary_indexes_test(self):
-        self.assertEqual(0, len(self._keyspace_meta().indexes))
+    def test_creating_and_dropping_table_with_2ary_indexes(self):
+        assert 0 == len(self._keyspace_meta().indexes)
         self.session.execute("create table born_to_die (id uuid primary key, name varchar)")
         self.session.execute("create index ix_born_to_die_name on born_to_die(name)")
 
-        self.assertEqual(1, len(self._keyspace_meta().indexes))
+        assert 1 == len(self._keyspace_meta().indexes)
         ix_meta = self._keyspace_meta().indexes['ix_born_to_die_name']
-        self.assertEqual('ix_born_to_die_name', ix_meta.name)
+        assert 'ix_born_to_die_name' == ix_meta.name
 
-        self.assertEqual({'target': 'name'}, ix_meta.index_options)
-        self.assertEqual('COMPOSITES', ix_meta.kind)
+        assert {'target': 'name'} == ix_meta.index_options
+        assert 'COMPOSITES' == ix_meta.kind
 
         self.session.execute("drop table born_to_die")
-        self.assertIsNone(self._keyspace_meta().tables.get('born_to_die'))
-        self.assertIsNone(self._keyspace_meta().indexes.get('ix_born_to_die_name'))
-        self.assertEqual(0, len(self._keyspace_meta().indexes))
+        assert self._keyspace_meta().tables.get('born_to_die') is None
+        assert self._keyspace_meta().indexes.get('ix_born_to_die_name') is None
+        assert 0 == len(self._keyspace_meta().indexes)
 
     @since('2.1')
-    def creating_and_dropping_user_types_test(self):
-        self.assertEqual(0, len(self._keyspace_meta().user_types))
+    def test_creating_and_dropping_user_types(self):
+        assert 0 == len(self._keyspace_meta().user_types)
         self.session.execute("CREATE TYPE soon_to_die (foo text, bar int)")
-        self.assertEqual(1, len(self._keyspace_meta().user_types))
+        assert 1 == len(self._keyspace_meta().user_types)
 
         ut_meta = self._keyspace_meta().user_types['soon_to_die']
-        self.assertEqual('ks', ut_meta.keyspace)
-        self.assertEqual('soon_to_die', ut_meta.name)
-        self.assertEqual(['foo', 'bar'], ut_meta.field_names)
-        self.assertEqual(['text', 'int'], ut_meta.field_types)
+        assert 'ks' == ut_meta.keyspace
+        assert 'soon_to_die' == ut_meta.name
+        assert ['foo', 'bar'] == ut_meta.field_names
+        assert ['text', 'int'] == ut_meta.field_types
 
         self.session.execute("DROP TYPE soon_to_die")
-        self.assertEqual(0, len(self._keyspace_meta().user_types))
+        assert 0 == len(self._keyspace_meta().user_types)
 
     @since('2.2')
-    def creating_and_dropping_udf_test(self):
-        self.assertEqual(0, len(self._keyspace_meta().functions), "expected to start with no indexes")
+    def test_creating_and_dropping_udf(self):
+        assert 0 == len(self._keyspace_meta().functions), "expected to start with no indexes"
         self.session.execute("""
                 CREATE OR REPLACE FUNCTION ks.wasteful_function (input double)
                     CALLED ON NULL INPUT
                     RETURNS double
                     LANGUAGE java AS 'return Double.valueOf(Math.log(input.doubleValue()));';
             """)
-        self.assertEqual(1, len(self._keyspace_meta().functions), "udf count should be 1")
+        assert 1 == len(self._keyspace_meta().functions), "udf count should be 1"
         udf_meta = self._keyspace_meta().functions['wasteful_function(double)']
-        self.assertEqual('ks', udf_meta.keyspace)
-        self.assertEqual('wasteful_function', udf_meta.name)
-        self.assertEqual(['double'], udf_meta.argument_types)
-        self.assertEqual(['input'], udf_meta.argument_names)
-        self.assertEqual('double', udf_meta.return_type)
-        self.assertEqual('java', udf_meta.language)
-        self.assertEqual('return Double.valueOf(Math.log(input.doubleValue()));', udf_meta.body)
-        self.assertTrue(udf_meta.called_on_null_input)
+        assert 'ks' == udf_meta.keyspace
+        assert 'wasteful_function' == udf_meta.name
+        assert ['double'] == udf_meta.argument_types
+        assert ['input'] == udf_meta.argument_names
+        assert 'double' == udf_meta.return_type
+        assert 'java' == udf_meta.language
+        assert 'return Double.valueOf(Math.log(input.doubleValue()));' == udf_meta.body
+        assert udf_meta.called_on_null_input
         self.session.execute("DROP FUNCTION ks.wasteful_function")
-        self.assertEqual(0, len(self._keyspace_meta().functions), "expected udf list to be back to zero")
+        assert 0 == len(self._keyspace_meta().functions), "expected udf list to be back to zero"
 
     @since('2.2')
-    def creating_and_dropping_uda_test(self):
-        self.assertEqual(0, len(self._keyspace_meta().functions), "expected to start with no indexes")
-        self.assertEqual(0, len(self._keyspace_meta().aggregates), "expected to start with no aggregates")
+    def test_creating_and_dropping_uda(self):
+        assert 0 == len(self._keyspace_meta().functions), "expected to start with no indexes"
+        assert 0 == len(self._keyspace_meta().aggregates), "expected to start with no aggregates"
         self.session.execute('''
                 CREATE FUNCTION ks.max_val(current int, candidate int)
                 CALLED ON NULL INPUT
@@ -615,86 +616,86 @@ class TestSchemaMetadata(Tester):
                 STYPE int
                 INITCOND -1
             ''')
-        self.assertEqual(1, len(self._keyspace_meta().functions), "udf count should be 1")
-        self.assertEqual(1, len(self._keyspace_meta().aggregates), "uda count should be 1")
+        assert 1 == len(self._keyspace_meta().functions), "udf count should be 1"
+        assert 1 == len(self._keyspace_meta().aggregates), "uda count should be 1"
         udf_meta = self._keyspace_meta().functions['max_val(int,int)']
         uda_meta = self._keyspace_meta().aggregates['kind_of_max_agg(int)']
 
-        self.assertEqual('ks', udf_meta.keyspace)
-        self.assertEqual('max_val', udf_meta.name)
-        self.assertEqual(['int', 'int'], udf_meta.argument_types)
-        self.assertEqual(['current', 'candidate'], udf_meta.argument_names)
-        self.assertEqual('int', udf_meta.return_type)
-        self.assertEqual('java', udf_meta.language)
-        self.assertEqual('if (current == null) return candidate; else return Math.max(current, candidate);', udf_meta.body)
-        self.assertTrue(udf_meta.called_on_null_input)
-
-        self.assertEqual('ks', uda_meta.keyspace)
-        self.assertEqual('kind_of_max_agg', uda_meta.name)
-        self.assertEqual(['int'], uda_meta.argument_types)
-        self.assertEqual('max_val', uda_meta.state_func)
-        self.assertEqual('int', uda_meta.state_type)
-        self.assertEqual(None, uda_meta.final_func)
-        self.assertEqual('-1', uda_meta.initial_condition)
-        self.assertEqual('int', uda_meta.return_type)
+        assert 'ks' == udf_meta.keyspace
+        assert 'max_val' == udf_meta.name
+        assert ['int', 'int'] == udf_meta.argument_types
+        assert ['current', 'candidate'] == udf_meta.argument_names
+        assert 'int' == udf_meta.return_type
+        assert 'java' == udf_meta.language
+        assert 'if (current == null) return candidate; else return Math.max(current, candidate);' == udf_meta.body
+        assert udf_meta.called_on_null_input
+
+        assert 'ks' == uda_meta.keyspace
+        assert 'kind_of_max_agg' == uda_meta.name
+        assert ['int'] == uda_meta.argument_types
+        assert 'max_val' == uda_meta.state_func
+        assert 'int' == uda_meta.state_type
+        assert None == uda_meta.final_func
+        assert '-1' == uda_meta.initial_condition
+        assert 'int' == uda_meta.return_type
 
         self.session.execute("DROP AGGREGATE ks.kind_of_max_agg")
-        self.assertEqual(0, len(self._keyspace_meta().aggregates), "expected uda list to be back to zero")
+        assert 0 == len(self._keyspace_meta().aggregates), "expected uda list to be back to zero"
         self.session.execute("DROP FUNCTION ks.max_val")
-        self.assertEqual(0, len(self._keyspace_meta().functions), "expected udf list to be back to zero")
+        assert 0 == len(self._keyspace_meta().functions), "expected udf list to be back to zero"
 
-    def basic_table_datatype_test(self):
+    def test_basic_table_datatype(self):
         establish_basic_datatype_table(self.cluster.version(), self.session)
         verify_basic_datatype_table(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
-    def collection_table_datatype_test(self):
+    def test_collection_table_datatype(self):
         establish_collection_datatype_table(self.cluster.version(), self.session)
         verify_collection_datatype_table(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
-    def clustering_order_test(self):
+    def test_clustering_order(self):
         establish_clustering_order_table(self.cluster.version(), self.session)
         verify_clustering_order_table(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
     @since("2.0", max_version="3.X")  # Compact Storage
-    def compact_storage_test(self):
+    def test_compact_storage(self):
         establish_compact_storage_table(self.cluster.version(), self.session)
         verify_compact_storage_table(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
     @since("2.0", max_version="3.X")  # Compact Storage
-    def compact_storage_composite_test(self):
+    def test_compact_storage_composite(self):
         establish_compact_storage_composite_table(self.cluster.version(), self.session)
         verify_compact_storage_composite_table(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
-    def nondefault_table_settings_test(self):
+    def test_nondefault_table_settings(self):
         establish_nondefault_table_settings(self.cluster.version(), self.session)
         verify_nondefault_table_settings(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
-    def indexes_test(self):
+    def test_indexes(self):
         establish_indexes_table(self.cluster.version(), self.session)
         verify_indexes_table(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
-    def durable_writes_test(self):
+    def test_durable_writes(self):
         establish_durable_writes_keyspace(self.cluster.version(), self.session)
         verify_durable_writes_keyspace(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
     @since('2.0')
-    def static_column_test(self):
+    def test_static_column(self):
         establish_static_column_table(self.cluster.version(), self.session)
         verify_static_column_table(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
     @since('2.1')
-    def udt_table_test(self):
+    def test_udt_table(self):
         establish_udt_table(self.cluster.version(), self.session)
         verify_udt_table(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
     @since('2.2')
-    def udf_test(self):
+    def test_udf(self):
         establish_udf(self.cluster.version(), self.session)
         self.session.cluster.refresh_schema_metadata()
         verify_udf(self.cluster.version(), self.cluster.version(), 'ks', self.session)
 
     @since('2.2')
-    def uda_test(self):
+    def test_uda(self):
         establish_uda(self.cluster.version(), self.session)
         self.session.cluster.refresh_schema_metadata()
         verify_uda(self.cluster.version(), self.cluster.version(), 'ks', self.session)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/schema_test.py
----------------------------------------------------------------------
diff --git a/schema_test.py b/schema_test.py
index 1553adc..8aaea3d 100644
--- a/schema_test.py
+++ b/schema_test.py
@@ -1,15 +1,19 @@
 import time
+import pytest
+import logging
 
 from cassandra.concurrent import execute_concurrent_with_args
 
 from tools.assertions import assert_invalid, assert_all, assert_one
-from tools.decorators import since
 from dtest import Tester, create_ks
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 class TestSchema(Tester):
 
-    def table_alteration_test(self):
+    def test_table_alteration(self):
         """
         Tests that table alters return as expected with many sstables at different schema points
         """
@@ -42,16 +46,16 @@ class TestSchema(Tester):
         rows = session.execute("select * from tbl_o_churn")
         for row in rows:
             if row.id < rows_to_insert * 5:
-                self.assertEqual(row.c1, 'bbb')
-                self.assertIsNone(row.c2)
-                self.assertFalse(hasattr(row, 'c0'))
+                assert row.c1 == 'bbb'
+                assert row.c2 is None
+                assert not hasattr(row, 'c0')
             else:
-                self.assertEqual(row.c1, 'ccc')
-                self.assertEqual(row.c2, 'ddd')
-                self.assertFalse(hasattr(row, 'c0'))
+                assert row.c1 == 'ccc'
+                assert row.c2 == 'ddd'
+                assert not hasattr(row, 'c0')
 
     @since("2.0", max_version="3.X")  # Compact Storage
-    def drop_column_compact_test(self):
+    def test_drop_column_compact(self):
         session = self.prepare()
 
         session.execute("USE ks")
@@ -59,7 +63,7 @@ class TestSchema(Tester):
 
         assert_invalid(session, "ALTER TABLE cf DROP c1", "Cannot drop columns from a")
 
-    def drop_column_compaction_test(self):
+    def test_drop_column_compaction(self):
         session = self.prepare()
         session.execute("USE ks")
         session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
@@ -84,7 +88,7 @@ class TestSchema(Tester):
         session = self.patient_cql_connection(node)
         assert_all(session, "SELECT c1 FROM ks.cf", [[None], [None], [None], [4]], ignore_order=True)
 
-    def drop_column_queries_test(self):
+    def test_drop_column_queries(self):
         session = self.prepare()
 
         session.execute("USE ks")
@@ -116,7 +120,7 @@ class TestSchema(Tester):
 
         assert_one(session, "SELECT * FROM cf WHERE c2 = 5", [3, 4, 5])
 
-    def drop_column_and_restart_test(self):
+    def test_drop_column_and_restart(self):
         """
         Simply insert data in a table, drop a column involved in the insert and restart the node afterwards.
         This ensures that the dropped_columns system table is properly flushed on the alter or the restart
@@ -142,7 +146,7 @@ class TestSchema(Tester):
         session.execute("USE ks")
         assert_one(session, "SELECT * FROM t", [0, 0])
 
-    def drop_static_column_and_restart_test(self):
+    def test_drop_static_column_and_restart(self):
         """
         Dropping a static column caused an sstable corrupt exception after restarting, here
         we test that we can drop a static column and restart safely.


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[03/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/paging_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/paging_test.py b/upgrade_tests/paging_test.py
index 1398199..e21ba88 100644
--- a/upgrade_tests/paging_test.py
+++ b/upgrade_tests/paging_test.py
@@ -1,38 +1,41 @@
 import itertools
 import time
 import uuid
-from unittest import SkipTest, skipUnless
+import pytest
+import logging
 
 from cassandra import ConsistencyLevel as CL
 from cassandra import InvalidRequest
 from cassandra.query import SimpleStatement, dict_factory, named_tuple_factory
 from ccmlib.common import LogPatternToVersion
-from nose.tools import assert_not_in
 
-from dtest import RUN_STATIC_UPGRADE_MATRIX, debug, run_scenarios
+from dtest import RUN_STATIC_UPGRADE_MATRIX, run_scenarios
 from tools.assertions import assert_read_timeout_or_failure
 from tools.data import rows_to_list
 from tools.datahelp import create_rows, flatten_into_set, parse_data_into_dicts
-from tools.decorators import since
 from tools.paging import PageAssertionMixin, PageFetcher
-from upgrade_base import UpgradeTester
-from upgrade_manifest import build_upgrade_pairs
+from .upgrade_base import UpgradeTester
+from .upgrade_manifest import build_upgrade_pairs
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class BasePagingTester(UpgradeTester):
 
     def prepare(self, *args, **kwargs):
         start_on, upgrade_to = self.UPGRADE_PATH.starting_meta, self.UPGRADE_PATH.upgrade_meta
-        if 'protocol_version' not in kwargs.keys():
+        if 'protocol_version' not in list(kwargs.keys()):
             # Due to CASSANDRA-10880, we need to use proto v3 (instead of v4) when it's a mixed cluster of 2.2.x and 3.0.x nodes.
             if start_on.family in ('2.1.x', '2.2.x') and upgrade_to.family == '3.0.x':
-                debug("Protocol version set to v3, due to 2.1.x/2.2.x and 3.0.x mixed version cluster.")
+                logger.debug("Protocol version set to v3, due to 2.1.x/2.2.x and 3.0.x mixed version cluster.")
                 kwargs['protocol_version'] = 3
 
         cursor = UpgradeTester.prepare(self, *args, row_factory=kwargs.pop('row_factory', dict_factory), **kwargs)
         return cursor
 
 
+@pytest.mark.upgrade_test
 class TestPagingSize(BasePagingTester, PageAssertionMixin):
     """
     Basic tests relating to page size (relative to results set)
@@ -47,7 +50,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
 
             # run a query that has no results and make sure it's exhausted
             future = cursor.execute_async(
@@ -56,15 +59,15 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future)
             pf.request_all()
-            self.assertEqual([], pf.all_data())
-            self.assertFalse(pf.has_more_pages)
+            assert [] == pf.all_data()
+            assert not pf.has_more_pages
 
     def test_with_less_results_than_page_size(self):
         cursor = self.prepare()
         cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -75,7 +78,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
                 |4 |and more testing|
                 |5 |and more testing|
                 """
-            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
             future = cursor.execute_async(
                 SimpleStatement("select * from paging_test", fetch_size=100, consistency_level=CL.ALL)
@@ -83,15 +86,15 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
             pf = PageFetcher(future)
             pf.request_all()
 
-            self.assertFalse(pf.has_more_pages)
-            self.assertEqual(len(expected_data), len(pf.all_data()))
+            assert not pf.has_more_pages
+            assert len(expected_data) == len(pf.all_data())
 
     def test_with_more_results_than_page_size(self):
         cursor = self.prepare()
         cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -106,7 +109,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
                 |8 |and more testing|
                 |9 |and more testing|
                 """
-            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
             future = cursor.execute_async(
                 SimpleStatement("select * from paging_test", fetch_size=5, consistency_level=CL.ALL)
@@ -114,8 +117,8 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            self.assertEqual(pf.pagecount(), 2)
-            self.assertEqual(pf.num_results_all(), [5, 4])
+            assert pf.pagecount() == 2
+            assert pf.num_results_all() == [5, 4]
 
             # make sure expected and actual have same data elements (ignoring order)
             self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
@@ -125,7 +128,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -136,7 +139,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
                 |4 |and more testing|
                 |5 |and more testing|
                 """
-            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
             future = cursor.execute_async(
                 SimpleStatement("select * from paging_test", fetch_size=5, consistency_level=CL.ALL)
@@ -144,8 +147,8 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            self.assertEqual(pf.num_results_all(), [5])
-            self.assertEqual(pf.pagecount(), 1)
+            assert pf.num_results_all() == [5]
+            assert pf.pagecount() == 1
 
             # make sure expected and actual have same data elements (ignoring order)
             self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
@@ -161,14 +164,14 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
             return uuid.uuid4()
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
                    | id     |value   |
               *5001| [uuid] |testing |
                 """
-            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': random_txt, 'value': unicode})
+            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': random_txt, 'value': str})
 
             future = cursor.execute_async(
                 SimpleStatement("select * from paging_test", consistency_level=CL.ALL)
@@ -176,7 +179,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            self.assertEqual(pf.num_results_all(), [5000, 1])
+            assert pf.num_results_all(), [5000 == 1]
 
             self.maxDiff = None
             # make sure expected and actual have same data elements (ignoring order)
@@ -204,7 +207,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
             """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -221,7 +224,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
                 |1 |j    |
                 """
 
-            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
             future = cursor.execute_async(
                 SimpleStatement("select * from paging_test where id = 1 order by value asc", fetch_size=5, consistency_level=CL.ALL)
@@ -229,14 +232,14 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            self.assertEqual(pf.pagecount(), 2)
-            self.assertEqual(pf.num_results_all(), [5, 5])
+            assert pf.pagecount() == 2
+            assert pf.num_results_all() == [5, 5]
 
             # these should be equal (in the same order)
-            self.assertEqual(pf.all_data(), expected_data)
+            assert pf.all_data() == expected_data
 
             # make sure we don't allow paging over multiple partitions with order because that's weird
-            with self.assertRaisesRegexp(InvalidRequest, 'Cannot page queries with both ORDER BY and a IN restriction on the partition key'):
+            with pytest.raises(InvalidRequest, match='Cannot page queries with both ORDER BY and a IN restriction on the partition key'):
                 stmt = SimpleStatement("select * from paging_test where id in (1,2) order by value asc", consistency_level=CL.ALL)
                 cursor.execute(stmt)
 
@@ -257,7 +260,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
             """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -274,7 +277,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
                 |1 |j    |j     |j     |
                 """
 
-            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode, 'value2': unicode})
+            expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str, 'value2': str})
 
             future = cursor.execute_async(
                 SimpleStatement("select * from paging_test where id = 1 order by value asc", fetch_size=3, consistency_level=CL.ALL)
@@ -282,12 +285,12 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            print "pages:", pf.num_results_all()
-            self.assertEqual(pf.pagecount(), 4)
-            self.assertEqual(pf.num_results_all(), [3, 3, 3, 1])
+            print("pages:", pf.num_results_all())
+            assert pf.pagecount() == 4
+            assert pf.num_results_all(), [3, 3, 3 == 1]
 
             # these should be equal (in the same order)
-            self.assertEqual(pf.all_data(), expected_data)
+            assert pf.all_data() == expected_data
 
             # drop the ORDER BY
             future = cursor.execute_async(
@@ -296,21 +299,21 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            self.assertEqual(pf.pagecount(), 4)
-            self.assertEqual(pf.num_results_all(), [3, 3, 3, 1])
+            assert pf.pagecount() == 4
+            assert pf.num_results_all(), [3, 3, 3 == 1]
 
             # these should be equal (in the same order)
-            self.assertEqual(pf.all_data(), list(reversed(expected_data)))
+            assert pf.all_data() == list(reversed(expected_data))
 
     def test_with_limit(self):
         cursor = self.prepare()
         cursor.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -379,8 +382,8 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
                     self.fail("Invalid configuration, this should never happen, please go into the code")
 
                 pf = PageFetcher(future).request_all()
-                self.assertEqual(pf.num_results_all(), scenario['expect_pgsizes'])
-                self.assertEqual(pf.pagecount(), scenario['expect_pgcount'])
+                assert pf.num_results_all() == scenario['expect_pgsizes']
+                assert pf.pagecount() == scenario['expect_pgcount']
 
                 # make sure all the data retrieved is a subset of input data
                 self.assertIsSubsetOf(pf.all_data(), expected_data)
@@ -392,7 +395,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -407,7 +410,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
                 |8 |and more testing|
                 |9 |and more testing|
                 """
-            create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+            create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
             future = cursor.execute_async(
                 SimpleStatement("select * from paging_test where value = 'and more testing' ALLOW FILTERING", fetch_size=4, consistency_level=CL.ALL)
@@ -415,8 +418,8 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            self.assertEqual(pf.pagecount(), 2)
-            self.assertEqual(pf.num_results_all(), [4, 3])
+            assert pf.pagecount() == 2
+            assert pf.num_results_all() == [4, 3]
 
             # make sure the allow filtering query matches the expected results (ignoring order)
             self.assertEqualIgnoreOrder(
@@ -431,18 +434,18 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
                     |7 |and more testing|
                     |8 |and more testing|
                     |9 |and more testing|
-                    """, format_funcs={'id': int, 'value': unicode}
+                    """, format_funcs={'id': int, 'value': str}
                 )
             )
 
 
+@pytest.mark.upgrade_test
 class TestPagingData(BasePagingTester, PageAssertionMixin):
 
-    def basic_paging_test(self):
+    def test_basic_paging(self):
         """
         A simple paging test that is easy to debug.
         """
-
         cursor = self.prepare()
 
         cursor.execute("""
@@ -464,12 +467,12 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE test")
             cursor.execute("TRUNCATE test2")
 
             for table in ("test", "test2"):
-                debug("Querying table %s" % (table,))
+                logger.debug("Querying table %s" % (table,))
                 expected = []
                 # match the key ordering for murmur3
                 for k in (1, 0, 2):
@@ -479,15 +482,15 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
                         expected.append([k, c, value])
 
                 for fetch_size in (2, 3, 5, 10, 100):
-                    debug("Using fetch size %d" % fetch_size)
+                    logger.debug("Using fetch size %d" % fetch_size)
                     cursor.default_fetch_size = fetch_size
                     results = rows_to_list(cursor.execute("SELECT * FROM %s" % (table,)))
                     import pprint
                     pprint.pprint(results)
-                    self.assertEqual(len(expected), len(results))
-                    self.assertEqual(expected, results)
+                    assert len(expected) == len(results)
+                    assert expected == results
 
-    def basic_compound_paging_test(self):
+    def test_basic_compound_paging(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -511,12 +514,12 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE test")
             cursor.execute("TRUNCATE test2")
 
             for table in ("test", "test2"):
-                debug("Querying table %s" % (table,))
+                logger.debug("Querying table %s" % (table,))
                 expected = []
                 # match the key ordering for murmur3
                 for k in (1, 0, 2):
@@ -526,23 +529,23 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
                         expected.append([k, c, 0, value])
 
                 for fetch_size in (2, 3, 5, 10, 100):
-                    debug("Using fetch size %d" % fetch_size)
+                    logger.debug("Using fetch size %d" % fetch_size)
                     cursor.default_fetch_size = fetch_size
                     results = rows_to_list(cursor.execute("SELECT * FROM %s" % (table,)))
                     import pprint
                     pprint.pprint(results)
-                    self.assertEqual(len(expected), len(results))
-                    self.assertEqual(expected, results)
+                    assert len(expected) == len(results)
+                    assert expected == results
 
     def test_paging_a_single_wide_row(self):
         cursor = self.prepare()
         cursor.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -557,11 +560,11 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            self.assertEqual(pf.pagecount(), 4)
-            self.assertEqual(pf.num_results_all(), [3000, 3000, 3000, 1000])
+            assert pf.pagecount() == 4
+            assert pf.num_results_all(), [3000, 3000, 3000 == 1000]
 
             all_results = pf.all_data()
-            self.assertEqual(len(expected_data), len(all_results))
+            assert len(expected_data) == len(all_results)
             self.maxDiff = None
             self.assertEqualIgnoreOrder(expected_data, all_results)
 
@@ -570,10 +573,10 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -589,8 +592,8 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
 
             pf = PageFetcher(future).request_all()
 
-            self.assertEqual(pf.pagecount(), 4)
-            self.assertEqual(pf.num_results_all(), [3000, 3000, 3000, 1000])
+            assert pf.pagecount() == 4
+            assert pf.num_results_all(), [3000, 3000, 3000 == 1000]
 
             self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
 
@@ -600,13 +603,13 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE INDEX ON paging_test(mybool)")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         def bool_from_str_int(text):
             return bool(int(text))
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -628,19 +631,18 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
             pf = PageFetcher(future).request_all()
 
             # the query only searched for True rows, so let's pare down the expectations for comparison
-            expected_data = filter(lambda x: x.get('mybool') is True, all_data)
+            expected_data = [x for x in all_data if x.get('mybool') is True]
 
-            self.assertEqual(pf.pagecount(), 2)
-            self.assertEqual(pf.num_results_all(), [400, 200])
+            assert pf.pagecount() == 2
+            assert pf.num_results_all() == [400, 200]
             self.assertEqualIgnoreOrder(expected_data, pf.all_data())
 
     @since('2.0.6')
-    def static_columns_paging_test(self):
+    def test_static_columns_paging(self):
         """
         Exercises paging with static columns to detect bugs
         @jira_ticket CASSANDRA-8502.
         """
-
         cursor = self.prepare(row_factory=named_tuple_factory)
         cursor.execute("CREATE TABLE test (a int, b int, c int, s1 int static, s2 int static, PRIMARY KEY (a, b))")
 
@@ -648,10 +650,10 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
             min_version = min(self.get_node_versions())
             latest_version_with_bug = '2.2.3'
             if min_version <= latest_version_with_bug:
-                raise SkipTest('known bug released in {latest_ver} and earlier (current min version {min_ver}); '
+                pytest.skip('known bug released in {latest_ver} and earlier (current min version {min_ver}); '
                                'skipping'.format(latest_ver=latest_version_with_bug, min_ver=min_version))
 
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE test")
 
             for i in range(4):
@@ -666,200 +668,200 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
                 "a, b, c")
 
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test" % selector))
                     import pprint
                     pprint.pprint(results)
-                    # self.assertEqual(16, len(results))
-                    self.assertEqual([0] * 4 + [1] * 4 + [2] * 4 + [3] * 4, sorted([r.a for r in results]))
-                    self.assertEqual([0, 1, 2, 3] * 4, [r.b for r in results])
-                    self.assertEqual([0, 1, 2, 3] * 4, [r.c for r in results])
+                    # assert 16 == len(results)
+                    assert [0] * 4 + [1] * 4 + [2] * 4 + [3] * 4 == sorted([r.a for r in results])
+                    assert [0, 1, 2, 3] * 4 == [r.b for r in results]
+                    assert [0, 1, 2, 3] * 4 == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 16, [r.s1 for r in results])
+                        assert [17] * 16 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 16, [r.s2 for r in results])
+                        assert [42] * 16 == [r.s2 for r in results]
 
             # IN over the partitions
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a IN (0, 1, 2, 3)" % selector))
-                    self.assertEqual(16, len(results))
-                    self.assertEqual([0] * 4 + [1] * 4 + [2] * 4 + [3] * 4, sorted([r.a for r in results]))
-                    self.assertEqual([0, 1, 2, 3] * 4, [r.b for r in results])
-                    self.assertEqual([0, 1, 2, 3] * 4, [r.c for r in results])
+                    assert 16 == len(results)
+                    assert [0] * 4 + [1] * 4 + [2] * 4 + [3] * 4 == sorted([r.a for r in results])
+                    assert [0, 1, 2, 3] * 4 == [r.b for r in results]
+                    assert [0, 1, 2, 3] * 4 == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 16, [r.s1 for r in results])
+                        assert [17] * 16 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 16, [r.s2 for r in results])
+                        assert [42] * 16 == [r.s2 for r in results]
 
             # single partition
             for i in range(16):
                 cursor.execute("INSERT INTO test (a, b, c, s1, s2) VALUES (%d, %d, %d, %d, %d)" % (99, i, i, 17, 42))
 
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99" % selector))
-                    self.assertEqual(16, len(results))
-                    self.assertEqual([99] * 16, [r.a for r in results])
-                    self.assertEqual(range(16), [r.b for r in results])
-                    self.assertEqual(range(16), [r.c for r in results])
+                    assert 16 == len(results)
+                    assert [99] * 16 == [r.a for r in results]
+                    assert list(range(16)) == [r.b for r in results]
+                    assert list(range(16)) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 16, [r.s1 for r in results])
+                        assert [17] * 16 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 16, [r.s2 for r in results])
+                        assert [42] * 16 == [r.s2 for r in results]
 
             # reversed
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 ORDER BY b DESC" % selector))
-                    self.assertEqual(16, len(results))
-                    self.assertEqual([99] * 16, [r.a for r in results])
-                    self.assertEqual(list(reversed(range(16))), [r.b for r in results])
-                    self.assertEqual(list(reversed(range(16))), [r.c for r in results])
+                    assert 16 == len(results)
+                    assert [99] * 16 == [r.a for r in results]
+                    assert list(reversed(list(range(16)))) == [r.b for r in results]
+                    assert list(reversed(list(range(16)))) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 16, [r.s1 for r in results])
+                        assert [17] * 16 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 16, [r.s2 for r in results])
+                        assert [42] * 16 == [r.s2 for r in results]
 
             # IN on clustering column
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 AND b IN (3, 4, 8, 14, 15)" % selector))
-                    self.assertEqual(5, len(results))
-                    self.assertEqual([99] * 5, [r.a for r in results])
-                    self.assertEqual([3, 4, 8, 14, 15], [r.b for r in results])
-                    self.assertEqual([3, 4, 8, 14, 15], [r.c for r in results])
+                    assert 5 == len(results)
+                    assert [99] * 5 == [r.a for r in results]
+                    assert [3, 4, 8, 14, 15] == [r.b for r in results]
+                    assert [3, 4, 8, 14, 15] == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 5, [r.s1 for r in results])
+                        assert [17] * 5 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 5, [r.s2 for r in results])
+                        assert [42] * 5 == [r.s2 for r in results]
 
             # reversed IN on clustering column
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 AND b IN (3, 4, 8, 14, 15) ORDER BY b DESC" % selector))
-                    self.assertEqual(5, len(results))
-                    self.assertEqual([99] * 5, [r.a for r in results])
-                    self.assertEqual(list(reversed([3, 4, 8, 14, 15])), [r.b for r in results])
-                    self.assertEqual(list(reversed([3, 4, 8, 14, 15])), [r.c for r in results])
+                    assert 5 == len(results)
+                    assert [99] * 5 == [r.a for r in results]
+                    assert list(reversed([3, 4, 8, 14, 15])) == [r.b for r in results]
+                    assert list(reversed([3, 4, 8, 14, 15])) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 5, [r.s1 for r in results])
+                        assert [17] * 5 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 5, [r.s2 for r in results])
+                        assert [42] * 5 == [r.s2 for r in results]
 
             # slice on clustering column with set start
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 AND b > 3" % selector))
-                    self.assertEqual(12, len(results))
-                    self.assertEqual([99] * 12, [r.a for r in results])
-                    self.assertEqual(range(4, 16), [r.b for r in results])
-                    self.assertEqual(range(4, 16), [r.c for r in results])
+                    assert 12 == len(results)
+                    assert [99] * 12 == [r.a for r in results]
+                    assert list(range(4, 16)) == [r.b for r in results]
+                    assert list(range(4, 16)) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 12, [r.s1 for r in results])
+                        assert [17] * 12 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 12, [r.s2 for r in results])
+                        assert [42] * 12 == [r.s2 for r in results]
 
             # reversed slice on clustering column with set finish
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 AND b > 3 ORDER BY b DESC" % selector))
-                    self.assertEqual(12, len(results))
-                    self.assertEqual([99] * 12, [r.a for r in results])
-                    self.assertEqual(list(reversed(range(4, 16))), [r.b for r in results])
-                    self.assertEqual(list(reversed(range(4, 16))), [r.c for r in results])
+                    assert 12 == len(results)
+                    assert [99] * 12 == [r.a for r in results]
+                    assert list(reversed(list(range(4, 16)))) == [r.b for r in results]
+                    assert list(reversed(list(range(4, 16)))) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 12, [r.s1 for r in results])
+                        assert [17] * 12 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 12, [r.s2 for r in results])
+                        assert [42] * 12 == [r.s2 for r in results]
 
             # slice on clustering column with set finish
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 AND b < 14" % selector))
-                    self.assertEqual(14, len(results))
-                    self.assertEqual([99] * 14, [r.a for r in results])
-                    self.assertEqual(range(14), [r.b for r in results])
-                    self.assertEqual(range(14), [r.c for r in results])
+                    assert 14 == len(results)
+                    assert [99] * 14 == [r.a for r in results]
+                    assert list(range(14)) == [r.b for r in results]
+                    assert list(range(14)) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 14, [r.s1 for r in results])
+                        assert [17] * 14 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 14, [r.s2 for r in results])
+                        assert [42] * 14 == [r.s2 for r in results]
 
             # reversed slice on clustering column with set start
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 AND b < 14 ORDER BY b DESC" % selector))
-                    self.assertEqual(14, len(results))
-                    self.assertEqual([99] * 14, [r.a for r in results])
-                    self.assertEqual(list(reversed(range(14))), [r.b for r in results])
-                    self.assertEqual(list(reversed(range(14))), [r.c for r in results])
+                    assert 14 == len(results)
+                    assert [99] * 14 == [r.a for r in results]
+                    assert list(reversed(list(range(14)))) == [r.b for r in results]
+                    assert list(reversed(list(range(14)))) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 14, [r.s1 for r in results])
+                        assert [17] * 14 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 14, [r.s2 for r in results])
+                        assert [42] * 14 == [r.s2 for r in results]
 
             # slice on clustering column with start and finish
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 AND b > 3 AND b < 14" % selector))
-                    self.assertEqual(10, len(results))
-                    self.assertEqual([99] * 10, [r.a for r in results])
-                    self.assertEqual(range(4, 14), [r.b for r in results])
-                    self.assertEqual(range(4, 14), [r.c for r in results])
+                    assert 10 == len(results)
+                    assert [99] * 10 == [r.a for r in results]
+                    assert list(range(4, 14)) == [r.b for r in results]
+                    assert list(range(4, 14)) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 10, [r.s1 for r in results])
+                        assert [17] * 10 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 10, [r.s2 for r in results])
+                        assert [42] * 10 == [r.s2 for r in results]
 
             # reversed slice on clustering column with start and finish
             for page_size in (2, 3, 4, 5, 15, 16, 17, 100):
-                debug("Using page size of %d" % page_size)
+                logger.debug("Using page size of %d" % page_size)
                 cursor.default_fetch_size = page_size
                 for selector in selectors:
-                    debug("Using selector '%s'" % (selector,))
+                    logger.debug("Using selector '%s'" % (selector,))
                     results = list(cursor.execute("SELECT %s FROM test WHERE a = 99 AND b > 3 AND b < 14 ORDER BY b DESC" % selector))
-                    self.assertEqual(10, len(results))
-                    self.assertEqual([99] * 10, [r.a for r in results])
-                    self.assertEqual(list(reversed(range(4, 14))), [r.b for r in results])
-                    self.assertEqual(list(reversed(range(4, 14))), [r.c for r in results])
+                    assert 10 == len(results)
+                    assert [99] * 10 == [r.a for r in results]
+                    assert list(reversed(list(range(4, 14)))) == [r.b for r in results]
+                    assert list(reversed(list(range(4, 14)))) == [r.c for r in results]
                     if "s1" in selector:
-                        self.assertEqual([17] * 10, [r.s1 for r in results])
+                        assert [17] * 10 == [r.s1 for r in results]
                     if "s2" in selector:
-                        self.assertEqual([42] * 10, [r.s2 for r in results])
+                        assert [42] * 10 == [r.s2 for r in results]
 
     @since('2.0')
     def test_paging_using_secondary_indexes_with_static_cols(self):
@@ -868,13 +870,13 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE INDEX ON paging_test(mybool)")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         def bool_from_str_int(text):
             return bool(int(text))
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -896,10 +898,10 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
             pf = PageFetcher(future).request_all()
 
             # the query only searched for True rows, so let's pare down the expectations for comparison
-            expected_data = filter(lambda x: x.get('mybool') is True, all_data)
+            expected_data = [x for x in all_data if x.get('mybool') is True]
 
-            self.assertEqual(pf.pagecount(), 2)
-            self.assertEqual(pf.num_results_all(), [400, 200])
+            assert pf.pagecount() == 2
+            assert pf.num_results_all() == [400, 200]
             self.assertEqualIgnoreOrder(expected_data, pf.all_data())
 
 
@@ -913,10 +915,10 @@ class TestPagingDatasetChanges(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE TABLE paging_test ( id int, mytext text, PRIMARY KEY (id, mytext) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -939,8 +941,8 @@ class TestPagingDatasetChanges(BasePagingTester, PageAssertionMixin):
             cursor.execute(SimpleStatement("insert into paging_test (id, mytext) values (1, 'foo')", consistency_level=CL.ALL))
 
             pf.request_all()
-            self.assertEqual(pf.pagecount(), 2)
-            self.assertEqual(pf.num_results_all(), [501, 499])
+            assert pf.pagecount() == 2
+            assert pf.num_results_all(), [501 == 499]
 
             self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
 
@@ -949,10 +951,10 @@ class TestPagingDatasetChanges(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE TABLE paging_test ( id int, mytext text, PRIMARY KEY (id, mytext) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -974,11 +976,11 @@ class TestPagingDatasetChanges(BasePagingTester, PageAssertionMixin):
             cursor.execute(SimpleStatement("insert into paging_test (id, mytext) values (2, 'foo')", consistency_level=CL.ALL))
 
             pf.request_all()
-            self.assertEqual(pf.pagecount(), 2)
-            self.assertEqual(pf.num_results_all(), [500, 500])
+            assert pf.pagecount() == 2
+            assert pf.num_results_all(), [500 == 500]
 
             # add the new row to the expected data and then do a compare
-            expected_data.append({u'id': 2, u'mytext': u'foo'})
+            expected_data.append({'id': 2, 'mytext': 'foo'})
             self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
 
     def test_row_TTL_expiry_during_paging(self):
@@ -986,10 +988,10 @@ class TestPagingDatasetChanges(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE TABLE paging_test ( id int, mytext text, PRIMARY KEY (id, mytext) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             # create rows with TTL (some of which we'll try to get after expiry)
@@ -1023,8 +1025,8 @@ class TestPagingDatasetChanges(BasePagingTester, PageAssertionMixin):
             time.sleep(15)
 
             pf.request_all()
-            self.assertEqual(pf.pagecount(), 3)
-            self.assertEqual(pf.num_results_all(), [300, 300, 200])
+            assert pf.pagecount() == 3
+            assert pf.num_results_all() == [300, 300, 200]
 
     def test_cell_TTL_expiry_during_paging(self):
         cursor = self.prepare()
@@ -1038,10 +1040,10 @@ class TestPagingDatasetChanges(BasePagingTester, PageAssertionMixin):
             """)
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = create_rows(
@@ -1084,7 +1086,7 @@ class TestPagingDatasetChanges(BasePagingTester, PageAssertionMixin):
             for row in data[1000:1500]:
                 _id, mytext = row['id'], row['mytext']
                 page3expected.append(
-                    {u'id': _id, u'mytext': mytext, u'somevalue': None, u'anothervalue': None}
+                    {'id': _id, 'mytext': mytext, 'somevalue': None, 'anothervalue': None}
                 )
 
             time.sleep(15)
@@ -1107,10 +1109,10 @@ class TestPagingQueryIsolation(BasePagingTester, PageAssertionMixin):
         cursor.execute("CREATE TABLE paging_test ( id int, mytext text, PRIMARY KEY (id, mytext) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             data = """
@@ -1158,17 +1160,17 @@ class TestPagingQueryIsolation(BasePagingTester, PageAssertionMixin):
             for pf in page_fetchers:
                 pf.request_all(timeout=10)
 
-            self.assertEqual(page_fetchers[0].pagecount(), 10)
-            self.assertEqual(page_fetchers[1].pagecount(), 9)
-            self.assertEqual(page_fetchers[2].pagecount(), 8)
-            self.assertEqual(page_fetchers[3].pagecount(), 7)
-            self.assertEqual(page_fetchers[4].pagecount(), 6)
-            self.assertEqual(page_fetchers[5].pagecount(), 5)
-            self.assertEqual(page_fetchers[6].pagecount(), 5)
-            self.assertEqual(page_fetchers[7].pagecount(), 5)
-            self.assertEqual(page_fetchers[8].pagecount(), 4)
-            self.assertEqual(page_fetchers[9].pagecount(), 4)
-            self.assertEqual(page_fetchers[10].pagecount(), 34)
+            assert page_fetchers[0].pagecount() == 10
+            assert page_fetchers[1].pagecount() == 9
+            assert page_fetchers[2].pagecount() == 8
+            assert page_fetchers[3].pagecount() == 7
+            assert page_fetchers[4].pagecount() == 6
+            assert page_fetchers[5].pagecount() == 5
+            assert page_fetchers[6].pagecount() == 5
+            assert page_fetchers[7].pagecount() == 5
+            assert page_fetchers[8].pagecount() == 4
+            assert page_fetchers[9].pagecount() == 4
+            assert page_fetchers[10].pagecount() == 34
 
             self.assertEqualIgnoreOrder(flatten_into_set(page_fetchers[0].all_data()), flatten_into_set(expected_data[:5000]))
             self.assertEqualIgnoreOrder(flatten_into_set(page_fetchers[1].all_data()), flatten_into_set(expected_data[5000:10000]))
@@ -1196,7 +1198,7 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
     def setup_data(self, cursor):
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         data = """
              | id | mytext   | col1 | col2 | col3 |
@@ -1237,12 +1239,12 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
 
         pf = self.get_page_fetcher(cursor)
         pf.request_all(timeout=timeout)
-        self.assertEqual(pf.pagecount(), pagecount)
-        self.assertEqual(pf.num_results_all(), num_page_results)
+        assert pf.pagecount() == pagecount
+        assert pf.num_results_all() == num_page_results
 
         for i in range(pf.pagecount()):
             page_data = pf.page_data(i + 1)
-            self.assertEquals(page_data, expected_pages_data[i])
+            assert page_data == expected_pages_data[i]
 
     def test_single_partition_deletions(self):
         """Test single partition deletions """
@@ -1250,7 +1252,7 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
         self.setup_schema(cursor)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
 
             expected_data = self.setup_data(cursor)
@@ -1296,7 +1298,7 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
         self.setup_schema(cursor)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
             expected_data = self.setup_data(cursor)
 
@@ -1315,7 +1317,7 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
         self.setup_schema(cursor)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
             expected_data = self.setup_data(cursor)
 
@@ -1365,7 +1367,7 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
         self.setup_schema(cursor)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
             expected_data = self.setup_data(cursor)
 
@@ -1417,7 +1419,7 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
         self.setup_schema(cursor)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
             expected_data = self.setup_data(cursor)
 
@@ -1457,7 +1459,7 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
         self.setup_schema(cursor)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
             data = self.setup_data(cursor)
 
@@ -1473,7 +1475,7 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
 
     def test_failure_threshold_deletions(self):
         """Test that paging throws a failure in case of tombstone threshold """
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
         self.cluster.set_configuration_options(
             values={'tombstone_failure_threshold': 500,
                     'read_request_timeout_in_ms': 1000,
@@ -1485,12 +1487,12 @@ class TestPagingWithDeletions(BasePagingTester, PageAssertionMixin):
         self.setup_schema(cursor)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
-            debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
+            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
             cursor.execute("TRUNCATE paging_test")
             self.setup_data(cursor)
 
             # Add more data
-            values = map(lambda i: uuid.uuid4(), range(3000))
+            values = [uuid.uuid4() for i in range(3000)]
             for value in values:
                 cursor.execute(SimpleStatement(
                     "insert into paging_test (id, mytext, col1) values (1, '{}', null) ".format(
@@ -1525,7 +1527,9 @@ for klaus in BasePagingTester.__subclasses__():
                                                             rf=spec['RF'],
                                                             pathname=spec['UPGRADE_PATH'].name)
         gen_class_name = klaus.__name__ + suffix
-        assert_not_in(gen_class_name, globals())
+        assert gen_class_name not in globals()
 
         upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or spec['UPGRADE_PATH'].upgrade_meta.matches_current_env_version_family
-        globals()[gen_class_name] = skipUnless(upgrade_applies_to_env, 'test not applicable to env.')(type(gen_class_name, (klaus,), spec))
+        if not upgrade_applies_to_env:
+            pytest.mark.skip(reason='test not applicable to env.')
+        globals()[gen_class_name] = type(gen_class_name, (klaus,), spec)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/regression_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/regression_test.py b/upgrade_tests/regression_test.py
index 613d195..d3cd914 100644
--- a/upgrade_tests/regression_test.py
+++ b/upgrade_tests/regression_test.py
@@ -1,23 +1,25 @@
 """
 Home for upgrade-related tests that don't fit in with the core upgrade testing in dtest.upgrade_through_versions
 """
-from unittest import skipUnless
+import glob
+import os
+import re
+import time
+import pytest
+import logging
 
 from cassandra import ConsistencyLevel as CL
-from nose.tools import assert_not_in
 
-from dtest import RUN_STATIC_UPGRADE_MATRIX, debug
-from tools.decorators import since
+from dtest import RUN_STATIC_UPGRADE_MATRIX
 from tools.jmxutils import (JolokiaAgent, make_mbean)
-from upgrade_base import UpgradeTester
-from upgrade_manifest import build_upgrade_pairs
+from .upgrade_base import UpgradeTester
+from .upgrade_manifest import build_upgrade_pairs
 
-import glob
-import os
-import re
-import time
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
+@pytest.mark.upgrade_test
 class TestForRegressions(UpgradeTester):
     """
     Catch-all class for regression tests on specific versions.
@@ -65,7 +67,7 @@ class TestForRegressions(UpgradeTester):
 
             for symbol, year in symbol_years:
                 count = s[1].execute("select count(*) from financial.symbol_history where symbol='{}' and year={};".format(symbol, year))[0][0]
-                self.assertEqual(count, expected_rows, "actual {} did not match expected {}".format(count, expected_rows))
+                assert count == expected_rows, "actual {} did not match expected {}".format(count, expected_rows)
 
     def test13294(self):
         """
@@ -80,7 +82,7 @@ class TestForRegressions(UpgradeTester):
         session = self.prepare(jolokia=True)
         session.execute("CREATE KEYSPACE test13294 WITH replication={'class':'SimpleStrategy', 'replication_factor': 2};")
         session.execute("CREATE TABLE test13294.t (id int PRIMARY KEY, d int) WITH compaction = {'class': 'SizeTieredCompactionStrategy','enabled':'false'}")
-        for x in xrange(0, 5):
+        for x in range(0, 5):
             session.execute("INSERT INTO test13294.t (id, d) VALUES (%d, %d)" % (x, x))
             cluster.flush()
 
@@ -113,9 +115,9 @@ class TestForRegressions(UpgradeTester):
                 sstables_after = self.get_all_sstables(node1)
                 # since autocompaction is disabled and we compact a single sstable above
                 # the number of sstables after should be the same as before.
-                self.assertEquals(len(sstables_before), len(sstables_after))
+                assert len(sstables_before) == len(sstables_after)
                 checked = True
-        self.assertTrue(checked)
+        assert checked
 
     @since('3.0.14', max_version='3.0.99')
     def test_schema_agreement(self):
@@ -137,10 +139,10 @@ class TestForRegressions(UpgradeTester):
         session.cluster.control_connection.wait_for_schema_agreement(wait_time=30)
 
         def validate_schema_agreement(n, is_upgr):
-            debug("querying node {} for schema information, upgraded: {}".format(n.name, is_upgr))
+            logger.debug("querying node {} for schema information, upgraded: {}".format(n.name, is_upgr))
 
             response = n.nodetool('describecluster').stdout
-            debug(response)
+            logger.debug(response)
             schemas = response.split('Schema versions:')[1].strip()
             num_schemas = len(re.findall('\[.*?\]', schemas))
             self.assertEqual(num_schemas, 1, "There were multiple schema versions during an upgrade: {}"
@@ -163,7 +165,7 @@ class TestForRegressions(UpgradeTester):
 
     def get_all_sstables(self, node):
         # note that node.get_sstables(...) only returns current version sstables
-        keyspace_dirs = [os.path.join(node.get_path(), "data{0}".format(x), "test13294") for x in xrange(0, node.cluster.data_dir_count)]
+        keyspace_dirs = [os.path.join(node.get_path(), "data{0}".format(x), "test13294") for x in range(0, node.cluster.data_dir_count)]
         files = []
         for d in keyspace_dirs:
             for f in glob.glob(d + "/*/*Data*"):
@@ -173,9 +175,11 @@ class TestForRegressions(UpgradeTester):
 
 for path in build_upgrade_pairs():
     gen_class_name = TestForRegressions.__name__ + path.name
-    assert_not_in(gen_class_name, globals())
+    assert gen_class_name not in globals()
     spec = {'UPGRADE_PATH': path,
             '__test__': True}
 
     upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or path.upgrade_meta.matches_current_env_version_family
-    globals()[gen_class_name] = skipUnless(upgrade_applies_to_env, 'test not applicable to env.')(type(gen_class_name, (TestForRegressions,), spec))
+    if not upgrade_applies_to_env:
+        pytest.mark.skip(reason='test not applicable to env.')
+    globals()[gen_class_name] = type(gen_class_name, (TestForRegressions,), spec)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/repair_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/repair_test.py b/upgrade_tests/repair_test.py
index aa6041a..9ac45a9 100644
--- a/upgrade_tests/repair_test.py
+++ b/upgrade_tests/repair_test.py
@@ -1,8 +1,11 @@
 import time
+import pytest
+import logging
 
-from dtest import debug
 from repair_tests.repair_test import BaseRepairTest
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 LEGACY_SSTABLES_JVM_ARGS = ["-Dcassandra.streamdes.initial_mem_buffer_size=1",
                             "-Dcassandra.streamdes.max_mem_buffer_size=5",
@@ -11,18 +14,18 @@ LEGACY_SSTABLES_JVM_ARGS = ["-Dcassandra.streamdes.initial_mem_buffer_size=1",
 
 # We don't support directly upgrading from 2.2 to 4.0 so disabling this on 4.0.
 # TODO: we should probably not hardcode versions?
+@pytest.mark.upgrade_test
 @since('3.0', max_version='4')
 class TestUpgradeRepair(BaseRepairTest):
-    __test__ = True
 
     @since('3.0')
-    def repair_after_upgrade_test(self):
+    def test_repair_after_upgrade(self):
         """
         @jira_ticket CASSANDRA-10990
         """
         default_install_dir = self.cluster.get_install_dir()
         cluster = self.cluster
-        debug("Setting version to 2.2.5")
+        logger.debug("Setting version to 2.2.5")
         cluster.set_install_dir(version="2.2.5")
         self._populate_cluster()
 
@@ -33,7 +36,7 @@ class TestUpgradeRepair(BaseRepairTest):
         cluster = self.cluster
 
         for node in cluster.nodelist():
-            debug("Upgrading %s to current version" % node.name)
+            logger.debug("Upgrading %s to current version" % node.name)
             if node.is_running():
                 node.flush()
                 time.sleep(1)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/storage_engine_upgrade_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/storage_engine_upgrade_test.py b/upgrade_tests/storage_engine_upgrade_test.py
index aa1cc27..acadbd3 100644
--- a/upgrade_tests/storage_engine_upgrade_test.py
+++ b/upgrade_tests/storage_engine_upgrade_test.py
@@ -1,22 +1,27 @@
 import os
 import time
+import pytest
+import logging
 
-from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester, debug
-from sstable_generation_loading_test import BaseSStableLoaderTest
-from thrift_bindings.v22.Cassandra import (ConsistencyLevel, Deletion,
+from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester
+from sstable_generation_loading_test import TestBaseSStableLoader
+from thrift_bindings.thrift010.Cassandra import (ConsistencyLevel, Deletion,
                                            Mutation, SlicePredicate,
                                            SliceRange)
-from thrift_tests import composite, get_thrift_client, i32
+from thrift_test import composite, get_thrift_client, i32
 from tools.assertions import (assert_all, assert_length_equal, assert_none,
                               assert_one)
-from tools.decorators import since
 from tools.misc import new_node
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 LEGACY_SSTABLES_JVM_ARGS = ["-Dcassandra.streamdes.initial_mem_buffer_size=1",
                             "-Dcassandra.streamdes.max_mem_buffer_size=5",
                             "-Dcassandra.streamdes.max_spill_file_size=128"]
 
 
+@pytest.mark.upgrade_test
 @since('3.0')
 class TestStorageEngineUpgrade(Tester):
 
@@ -67,15 +72,15 @@ class TestStorageEngineUpgrade(Tester):
             node2.start(wait_for_binary_proto=True, jvm_args=self.jvm_args)
 
             temp_files = self.glob_data_dirs(os.path.join('*', "tmp", "*.dat"))
-            debug("temp files: " + str(temp_files))
-            self.assertEquals(0, len(temp_files), "Temporary files were not cleaned up.")
+            logger.debug("temp files: " + str(temp_files))
+            assert 0 == len(temp_files), "Temporary files were not cleaned up."
 
         cursor = self.patient_cql_connection(node1)
         if login_keyspace:
             cursor.execute('USE ks')
         return cursor
 
-    def update_and_drop_column_test(self):
+    def test_update_and_drop_column(self):
         """
         Checks that dropped columns are properly handled in legacy sstables
 
@@ -95,27 +100,27 @@ class TestStorageEngineUpgrade(Tester):
 
         assert_one(cursor, "SELECT * FROM t", ['some_key', 0])
 
-    def upgrade_with_clustered_CQL_table_test(self):
+    def test_upgrade_with_clustered_CQL_table(self):
         """
         Validates we can do basic slice queries (forward and reverse ones) on legacy sstables for a CQL table
         with a clustering column.
         """
         self.upgrade_with_clustered_table()
 
-    def upgrade_with_clustered_compact_table_test(self):
+    def test_upgrade_with_clustered_compact_table(self):
         """
         Validates we can do basic slice queries (forward and reverse ones) on legacy sstables for a COMPACT table
         with a clustering column.
         """
         self.upgrade_with_clustered_table(compact_storage=True)
 
-    def upgrade_with_unclustered_CQL_table_test(self):
+    def test_upgrade_with_unclustered_CQL_table(self):
         """
         Validates we can do basic name queries on legacy sstables for a CQL table without clustering.
         """
         self.upgrade_with_unclustered_table()
 
-    def upgrade_with_unclustered_compact_table_test(self):
+    def test_upgrade_with_unclustered_compact_table(self):
         """
         Validates we can do basic name queries on legacy sstables for a COMPACT table without clustering.
         """
@@ -214,10 +219,10 @@ class TestStorageEngineUpgrade(Tester):
         for n in range(PARTITIONS):
             assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), [n, n + 1, n + 2, n + 3, n + 4])
 
-    def upgrade_with_statics_test(self):
+    def test_upgrade_with_statics(self):
         self.upgrade_with_statics(rows=10)
 
-    def upgrade_with_wide_partition_and_statics_test(self):
+    def test_upgrade_with_wide_partition_and_statics(self):
         """ Checks we read old indexed sstables with statics by creating partitions larger than a single index block"""
         self.upgrade_with_statics(rows=1000)
 
@@ -255,13 +260,13 @@ class TestStorageEngineUpgrade(Tester):
                        "SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
                        [[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS - 1, -1, -1)])
 
-    def upgrade_with_wide_partition_test(self):
+    def test_upgrade_with_wide_partition(self):
         """
         Checks we can read old indexed sstable by creating large partitions (larger than the index block used by sstables).
         """
         self.upgrade_with_wide_partition()
 
-    def upgrade_with_wide_partition_reversed_test(self):
+    def test_upgrade_with_wide_partition_reversed(self):
         """
         Checks we can read old indexed sstable by creating large partitions (larger than the index block used by sstables). This test
         validates reverse queries.
@@ -315,7 +320,7 @@ class TestStorageEngineUpgrade(Tester):
             else:
                 assert_none(session, query)
 
-    def upgrade_with_index_test(self):
+    def test_upgrade_with_index(self):
         """
         Checks a simple index can still be read after upgrade.
         """
@@ -353,7 +358,7 @@ class TestStorageEngineUpgrade(Tester):
                    [[p, r, 0, r * 2] for p in range(PARTITIONS) for r in range(ROWS) if r % 2 == 0],
                    ignore_order=True)
 
-    def upgrade_with_range_tombstones_test(self):
+    def test_upgrade_with_range_tombstones(self):
         """
         Checks sstable including range tombstone can be read after upgrade.
 
@@ -379,7 +384,7 @@ class TestStorageEngineUpgrade(Tester):
 
         self.cluster.compact()
 
-    def upgrade_with_range_and_collection_tombstones_test(self):
+    def test_upgrade_with_range_and_collection_tombstones(self):
         """
         Check sstable including collection tombstone (inserted through adding a collection) can be read after upgrade.
 
@@ -398,7 +403,7 @@ class TestStorageEngineUpgrade(Tester):
         assert_one(session, "SELECT k FROM t", ['some_key'])
 
     @since('3.0', max_version='4')
-    def upgrade_with_range_tombstone_eoc_0_test(self):
+    def test_upgrade_with_range_tombstone_eoc_0(self):
         """
         Check sstable upgrading when the sstable contains a range tombstone with EOC=0.
 
@@ -433,7 +438,7 @@ class TestStorageEngineUpgrade(Tester):
         assert_length_equal(ret, 2)
 
     @since('3.0')
-    def upgrade_with_range_tombstone_ae_test(self):
+    def test_upgrade_with_range_tombstone_ae(self):
         """
         Certain range tombstone pattern causes AssertionError when upgrade.
         This test makes sure it won't happeen.
@@ -448,6 +453,7 @@ class TestStorageEngineUpgrade(Tester):
         assert_none(session, "SELECT k FROM test")
 
 
+@pytest.mark.upgrade_test
 @since('3.0')
 class TestBootstrapAfterUpgrade(TestStorageEngineUpgrade):
 
@@ -455,31 +461,31 @@ class TestBootstrapAfterUpgrade(TestStorageEngineUpgrade):
         super(TestBootstrapAfterUpgrade, self).setUp(bootstrap=True, jvm_args=LEGACY_SSTABLES_JVM_ARGS)
 
 
+@pytest.mark.upgrade_test
 @since('3.0', max_version='4')
-class TestLoadKaSStables(BaseSStableLoaderTest):
-    __test__ = True
+class TestLoadKaSStables(TestBaseSStableLoader):
     upgrade_from = '2.1.6'
     jvm_args = LEGACY_SSTABLES_JVM_ARGS
 
 
+@pytest.mark.upgrade_test
 @since('3.0', max_version='4')
-class TestLoadKaCompactSStables(BaseSStableLoaderTest):
-    __test__ = True
+class TestLoadKaCompactSStables(TestBaseSStableLoader):
     upgrade_from = '2.1.6'
     jvm_args = LEGACY_SSTABLES_JVM_ARGS
     compact = True
 
 
+@pytest.mark.upgrade_test
 @since('3.0', max_version='4')
-class TestLoadLaSStables(BaseSStableLoaderTest):
-    __test__ = True
+class TestLoadLaSStables(TestBaseSStableLoader):
     upgrade_from = '2.2.4'
     jvm_args = LEGACY_SSTABLES_JVM_ARGS
 
 
+@pytest.mark.upgrade_test
 @since('3.0', max_version='4')
-class TestLoadLaCompactSStables(BaseSStableLoaderTest):
-    __test__ = True
+class TestLoadLaCompactSStables(TestBaseSStableLoader):
     upgrade_from = '2.2.4'
     jvm_args = LEGACY_SSTABLES_JVM_ARGS
     compact = True


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[07/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_tests.py
----------------------------------------------------------------------
diff --git a/thrift_tests.py b/thrift_tests.py
deleted file mode 100644
index 9b46665..0000000
--- a/thrift_tests.py
+++ /dev/null
@@ -1,2679 +0,0 @@
-import re
-import struct
-import time
-import uuid
-from unittest import skipIf
-
-from thrift.protocol import TBinaryProtocol
-from thrift.Thrift import TApplicationException
-from thrift.transport import TSocket, TTransport
-
-from tools.assertions import assert_length_equal
-from dtest import (CASSANDRA_VERSION_FROM_BUILD, DISABLE_VNODES, NUM_TOKENS,
-                   ReusableClusterTester, debug, init_default_config)
-from thrift_bindings.v22 import Cassandra
-from thrift_bindings.v22.Cassandra import (CfDef, Column, ColumnDef,
-                                           ColumnOrSuperColumn, ColumnParent,
-                                           ColumnPath, ColumnSlice,
-                                           ConsistencyLevel, CounterColumn,
-                                           Deletion, IndexExpression,
-                                           IndexOperator, IndexType,
-                                           InvalidRequestException, KeyRange,
-                                           KeySlice, KsDef, MultiSliceRequest,
-                                           Mutation, NotFoundException,
-                                           SlicePredicate, SliceRange,
-                                           SuperColumn)
-from tools.assertions import assert_all, assert_none, assert_one
-from tools.decorators import since
-
-
-def get_thrift_client(host='127.0.0.1', port=9160):
-    socket = TSocket.TSocket(host, port)
-    transport = TTransport.TFramedTransport(socket)
-    protocol = TBinaryProtocol.TBinaryProtocol(transport)
-    client = Cassandra.Client(protocol)
-    client.transport = transport
-    return client
-
-
-client = None
-
-pid_fname = "system_test.pid"
-
-
-def pid():
-    return int(open(pid_fname).read())
-
-
-@since('2.0', max_version='4')
-class ThriftTester(ReusableClusterTester):
-    client = None
-    extra_args = []
-    cluster_options = {'partitioner': 'org.apache.cassandra.dht.ByteOrderedPartitioner',
-                       'start_rpc': 'true'}
-
-    @classmethod
-    def setUpClass(cls):
-        # super() needs to be used here for 'cls' to be bound to the correct class
-        super(ThriftTester, cls).setUpClass()
-
-    def setUp(self):
-        # This is called before the @since annotation has had time to take
-        # effect and we don't want to even try connecting on thrift in 4.0
-        if self.cluster.version() >= '4':
-            return
-
-        ReusableClusterTester.setUp(self)
-
-        # this is ugly, but the whole test module is written against a global client
-        global client
-        client = get_thrift_client()
-        client.transport.open()
-
-    def tearDown(self):
-        # This is called before the @since annotation has had time to take
-        # effect and we don't want to even try connecting on thrift in 4.0
-        if self.cluster.version() >= '4':
-            return
-
-        client.transport.close()
-        ReusableClusterTester.tearDown(self)
-
-    @classmethod
-    def post_initialize_cluster(cls):
-        cluster = cls.cluster
-
-        # This is called before the @since annotation has had time to take
-        # effect and we don't want to even try connecting on thrift in 4.0
-        if cluster.version() >= '4':
-            return
-
-        cluster.populate(1)
-        node1, = cluster.nodelist()
-
-        # If vnodes are not used, we must set our own initial_token
-        # Because ccm will not set a hex token for ByteOrderedPartitioner
-        # automatically. It does not matter what token we set as we only
-        # ever use one node.
-        if DISABLE_VNODES:
-            node1.set_configuration_options(values={'initial_token': "a".encode('hex')})
-
-        cluster.start(wait_for_binary_proto=True)
-        cluster.nodelist()[0].watch_log_for("Listening for thrift clients")  # Wait for the thrift port to open
-        time.sleep(0.1)
-        cls.client = get_thrift_client()
-        cls.client.transport.open()
-        cls.define_schema()
-
-    @classmethod
-    def init_config(cls):
-        init_default_config(cls.cluster, ThriftTester.cluster_options)
-
-    @classmethod
-    def define_schema(cls):
-        keyspace1 = Cassandra.KsDef('Keyspace1', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'},
-                                    cf_defs=[
-            Cassandra.CfDef('Keyspace1', 'Standard1'),
-            Cassandra.CfDef('Keyspace1', 'Standard2'),
-            Cassandra.CfDef('Keyspace1', 'Standard3', column_metadata=[Cassandra.ColumnDef('c1', 'AsciiType'), Cassandra.ColumnDef('c2', 'AsciiType')]),
-            Cassandra.CfDef('Keyspace1', 'Standard4', column_metadata=[Cassandra.ColumnDef('c1', 'AsciiType')]),
-            Cassandra.CfDef('Keyspace1', 'StandardLong1', comparator_type='LongType'),
-            Cassandra.CfDef('Keyspace1', 'StandardInteger1', comparator_type='IntegerType'),
-            Cassandra.CfDef('Keyspace1', 'StandardComposite', comparator_type='CompositeType(AsciiType, AsciiType)'),
-            Cassandra.CfDef('Keyspace1', 'Super1', column_type='Super', subcomparator_type='LongType'),
-            Cassandra.CfDef('Keyspace1', 'Super2', column_type='Super', subcomparator_type='LongType'),
-            Cassandra.CfDef('Keyspace1', 'Super3', column_type='Super', comparator_type='LongType', subcomparator_type='UTF8Type'),
-            Cassandra.CfDef('Keyspace1', 'Counter1', default_validation_class='CounterColumnType'),
-            Cassandra.CfDef('Keyspace1', 'SuperCounter1', column_type='Super', default_validation_class='CounterColumnType'),
-            Cassandra.CfDef('Keyspace1', 'Indexed1', column_metadata=[Cassandra.ColumnDef('birthdate', 'LongType', Cassandra.IndexType.KEYS, 'birthdate_index')]),
-            Cassandra.CfDef('Keyspace1', 'Indexed2', comparator_type='TimeUUIDType', column_metadata=[Cassandra.ColumnDef(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, 'LongType', Cassandra.IndexType.KEYS)]),
-            Cassandra.CfDef('Keyspace1', 'Indexed3', comparator_type='TimeUUIDType', column_metadata=[Cassandra.ColumnDef(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, 'UTF8Type', Cassandra.IndexType.KEYS)]),
-            Cassandra.CfDef('Keyspace1', 'Indexed4', column_metadata=[Cassandra.ColumnDef('a', 'LongType', Cassandra.IndexType.KEYS, 'a_index'), Cassandra.ColumnDef('z', 'UTF8Type')]),
-            Cassandra.CfDef('Keyspace1', 'Expiring', default_time_to_live=2)
-        ])
-
-        keyspace2 = Cassandra.KsDef('Keyspace2', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'},
-                                    cf_defs=[
-                                        Cassandra.CfDef('Keyspace2', 'Standard1'),
-                                        Cassandra.CfDef('Keyspace2', 'Standard3'),
-                                        Cassandra.CfDef('Keyspace2', 'Super3', column_type='Super', subcomparator_type='BytesType'),
-                                        Cassandra.CfDef('Keyspace2', 'Super4', column_type='Super', subcomparator_type='TimeUUIDType'), ])
-
-        for ks in [keyspace1, keyspace2]:
-            cls.client.system_add_keyspace(ks)
-
-
-def i64(n):
-    return _i64(n)
-
-
-def i32(n):
-    return _i32(n)
-
-
-def i16(n):
-    return _i16(n)
-
-
-def composite(item1, item2=None, eoc='\x00'):
-    packed = _i16(len(item1)) + item1 + eoc
-    if item2 is not None:
-        packed += _i16(len(item2)) + item2
-        packed += eoc
-    return packed
-
-
-def _i64(n):
-    return struct.pack('>q', n)  # big endian = network order
-
-
-def _i32(n):
-    return struct.pack('>i', n)  # big endian = network order
-
-
-def _i16(n):
-    return struct.pack('>h', n)  # big endian = network order
-
-
-_SIMPLE_COLUMNS = [Column('c1', 'value1', 0),
-                   Column('c2', 'value2', 0)]
-_SUPER_COLUMNS = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
-                  SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 0),
-                                                   Column(_i64(6), 'value6', 0)])]
-
-
-def _assert_column(column_family, key, column, value, ts=0):
-    try:
-        assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
-    except NotFoundException:
-        raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value))
-
-
-def _assert_columnpath_exists(key, column_path):
-    try:
-        assert client.get(key, column_path, ConsistencyLevel.ONE)
-    except NotFoundException:
-        raise Exception('expected %s with %s but was not present.' % (key, column_path))
-
-
-def _assert_no_columnpath(key, column_path):
-    try:
-        client.get(key, column_path, ConsistencyLevel.ONE)
-        assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
-    except NotFoundException:
-        assert True, 'column did not exist'
-
-
-def _insert_simple():
-    return _insert_multi(['key1'])
-
-
-def _insert_multi(keys):
-    CL = ConsistencyLevel.ONE
-    for key in keys:
-        client.insert(key, ColumnParent('Standard1'), Column('c1', 'value1', 0), CL)
-        client.insert(key, ColumnParent('Standard1'), Column('c2', 'value2', 0), CL)
-
-
-def _insert_batch():
-    cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
-             'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
-    client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
-
-
-def _big_slice(key, column_parent):
-    p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
-    return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
-
-
-def _big_multislice(keys, column_parent):
-    p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
-    return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
-
-
-def _verify_batch():
-    _verify_simple()
-    L = [result.column
-         for result in _big_slice('key1', ColumnParent('Standard2'))]
-    assert L == _SIMPLE_COLUMNS, L
-
-
-def _verify_simple():
-    assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE).column == Column('c1', 'value1', 0)
-    L = [result.column
-         for result in _big_slice('key1', ColumnParent('Standard1'))]
-    assert L == _SIMPLE_COLUMNS, L
-
-
-def _insert_super(key='key1'):
-    client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
-    client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
-    client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
-
-
-def _insert_range():
-    client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
-    client.insert('key1', ColumnParent('Standard1'), Column('c2', 'value2', 0), ConsistencyLevel.ONE)
-    client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
-
-
-def _verify_range():
-    p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
-    result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
-    assert len(result) == 2
-    assert result[0].column.name == 'c1'
-    assert result[1].column.name == 'c2'
-
-    p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
-    result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
-    assert len(result) == 2
-    assert result[0].column.name == 'c3'
-    assert result[1].column.name == 'c2'
-
-    p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
-    result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
-    assert len(result) == 3, result
-
-    p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
-    result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
-    assert len(result) == 2, result
-
-
-def _set_keyspace(keyspace):
-    client.set_keyspace(keyspace)
-
-
-def _insert_super_range():
-    client.insert('key1', ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
-    client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
-    client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
-    client.insert('key1', ColumnParent('Super1', 'sc3'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
-    time.sleep(0.1)
-
-
-def _verify_super_range():
-    p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
-    result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
-    assert len(result) == 2
-    assert result[0].super_column.name == 'sc2'
-    assert result[1].super_column.name == 'sc3'
-
-    p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
-    result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
-    assert len(result) == 2
-    assert result[0].super_column.name == 'sc3'
-    assert result[1].super_column.name == 'sc2'
-
-
-def _verify_super(supercf='Super1', key='key1'):
-    assert client.get(key, ColumnPath(supercf, 'sc1', _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), 'value4', 0)
-    slice = [result.super_column
-             for result in _big_slice(key, ColumnParent('Super1'))]
-    assert slice == _SUPER_COLUMNS, slice
-
-
-def _expect_exception(fn, type_):
-    try:
-        r = fn()
-    except type_ as t:
-        return t
-    else:
-        raise Exception('expected %s; got %s' % (type_.__name__, r))
-
-
-def _expect_missing(fn):
-    _expect_exception(fn, NotFoundException)
-
-
-def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
-    kr = KeyRange(start, end, count=count, row_filter=row_filter)
-    return client.get_range_slices(parent, predicate, kr, cl)
-
-
-def _insert_six_columns(key='abc'):
-    CL = ConsistencyLevel.ONE
-    client.insert(key, ColumnParent('Standard1'), Column('a', '1', 0), CL)
-    client.insert(key, ColumnParent('Standard1'), Column('b', '2', 0), CL)
-    client.insert(key, ColumnParent('Standard1'), Column('c', '3', 0), CL)
-    client.insert(key, ColumnParent('Standard1'), Column('d', '4', 0), CL)
-    client.insert(key, ColumnParent('Standard1'), Column('e', '5', 0), CL)
-    client.insert(key, ColumnParent('Standard1'), Column('f', '6', 0), CL)
-
-
-def _big_multi_slice(key='abc'):
-    c1 = ColumnSlice()
-    c1.start = 'a'
-    c1.finish = 'c'
-    c2 = ColumnSlice()
-    c2.start = 'e'
-    c2.finish = 'f'
-    m = MultiSliceRequest()
-    m.key = key
-    m.column_parent = ColumnParent('Standard1')
-    m.column_slices = [c1, c2]
-    m.reversed = False
-    m.count = 10
-    m.consistency_level = ConsistencyLevel.ONE
-    return client.get_multi_slice(m)
-
-
-_MULTI_SLICE_COLUMNS = [Column('a', '1', 0), Column('b', '2', 0), Column('c', '3', 0), Column('e', '5', 0), Column('f', '6', 0)]
-
-
-@since('2.0', max_version='4')
-class TestMutations(ThriftTester):
-
-    def truncate_all(self, *table_names):
-        for table in table_names:
-            client.truncate(table)
-
-    def test_insert(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-        _insert_simple()
-        _verify_simple()
-
-    def test_empty_slice(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard2', 'Super1')
-        assert _big_slice('key1', ColumnParent('Standard2')) == []
-        assert _big_slice('key1', ColumnParent('Super1')) == []
-
-    def test_cas(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Standard3', 'Standard4')
-
-        def cas(expected, updates, column_family):
-            return client.cas('key1', column_family, expected, updates, ConsistencyLevel.SERIAL, ConsistencyLevel.QUORUM)
-
-        def test_cas_operations(first_columns, second_columns, column_family):
-            # partition should be empty, so cas expecting any existing values should fail
-            cas_result = cas(first_columns, first_columns, column_family)
-            assert not cas_result.success
-            assert len(cas_result.current_values) == 0, cas_result
-
-            # cas of empty columns -> first_columns should succeed
-            # and the reading back from the table should match first_columns
-            assert cas([], first_columns, column_family).success
-            result = [cosc.column for cosc in _big_slice('key1', ColumnParent(column_family))]
-            # CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
-            assert dict((c.name, c.value) for c in result) == dict((ex.name, ex.value) for ex in first_columns)
-
-            # now that the partition has been updated, repeating the
-            # operation which expects it to be empty should not succeed
-            cas_result = cas([], first_columns, column_family)
-            assert not cas_result.success
-            # When we CAS for non-existence, current_values is the first live column of the row
-            assert dict((c.name, c.value) for c in cas_result.current_values) == {first_columns[0].name: first_columns[0].value}, cas_result
-
-            # CL.SERIAL for reads
-            assert client.get('key1', ColumnPath(column_family, column=first_columns[0].name), ConsistencyLevel.SERIAL).column.value == first_columns[0].value
-
-            # cas first_columns -> second_columns should succeed
-            assert cas(first_columns, second_columns, column_family).success
-
-            # as before, an operation with an incorrect expectation should fail
-            cas_result = cas(first_columns, second_columns, column_family)
-            assert not cas_result.success
-
-        updated_columns = [Column('c1', 'value101', 1),
-                           Column('c2', 'value102', 1)]
-
-        debug("Testing CAS operations on dynamic cf")
-        test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard1')
-        debug("Testing CAS operations on static cf")
-        test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard3')
-        debug("Testing CAS on mixed static/dynamic cf")
-        test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard4')
-
-    def test_missing_super(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
-        _insert_super()
-        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
-
-    def test_count(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Standard2', 'Super1')
-
-        _insert_simple()
-        _insert_super()
-        p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
-        assert client.get_count('key1', ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
-        assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
-        assert client.get_count('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE) == 2
-        assert client.get_count('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
-
-        # Let's make that a little more interesting
-        client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('c4', 'value4', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('c5', 'value5', 0), ConsistencyLevel.ONE)
-
-        p = SlicePredicate(slice_range=SliceRange('c2', 'c4', False, 1000))
-        assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
-
-    def test_count_paging(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        _insert_simple()
-
-        # Exercise paging
-        column_parent = ColumnParent('Standard1')
-        # Paging for small columns starts at 1024 columns
-        columns_to_insert = [Column('c%d' % (i,), 'value%d' % (i,), 0) for i in xrange(3, 1026)]
-        cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
-        client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
-
-        p = SlicePredicate(slice_range=SliceRange('', '', False, 2000))
-        assert client.get_count('key1', column_parent, p, ConsistencyLevel.ONE) == 1025
-
-        # Ensure that the count limit isn't clobbered
-        p = SlicePredicate(slice_range=SliceRange('', '', False, 10))
-        assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
-
-    # test get_count() to work correctly with 'count' settings around page size (CASSANDRA-4833)
-    def test_count_around_page_size(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        def slice_predicate(count):
-            return SlicePredicate(slice_range=SliceRange('', '', False, count))
-
-        key = 'key1'
-        parent = ColumnParent('Standard1')
-        cl = ConsistencyLevel.ONE
-
-        for i in xrange(0, 3050):
-            client.insert(key, parent, Column(str(i), '', 0), cl)
-
-        # same as page size
-        assert client.get_count(key, parent, slice_predicate(1024), cl) == 1024
-
-        # 1 above page size
-        assert client.get_count(key, parent, slice_predicate(1025), cl) == 1025
-
-        # above number or columns
-        assert client.get_count(key, parent, slice_predicate(4000), cl) == 3050
-
-        # same as number of columns
-        assert client.get_count(key, parent, slice_predicate(3050), cl) == 3050
-
-        # 1 above number of columns
-        assert client.get_count(key, parent, slice_predicate(3051), cl) == 3050
-
-    def test_super_insert(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        _insert_super()
-        _verify_super()
-
-    def test_super_get(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        _insert_super()
-        result = client.get('key1', ColumnPath('Super1', 'sc2'), ConsistencyLevel.ONE).super_column
-        assert result == _SUPER_COLUMNS[1], result
-
-    def test_super_subcolumn_limit(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-        _insert_super()
-        p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
-        column_parent = ColumnParent('Super1', 'sc2')
-        slice = [result.column
-                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
-        assert slice == [Column(_i64(5), 'value5', 0)], slice
-        p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
-        slice = [result.column
-                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
-        assert slice == [Column(_i64(6), 'value6', 0)], slice
-
-    def test_long_order(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('StandardLong1')
-
-        def long_xrange(start, stop, step):
-            i = start
-            while i < stop:
-                yield i
-                i += step
-        L = []
-        for i in long_xrange(0, 104294967296, 429496729):
-            name = _i64(i)
-            client.insert('key1', ColumnParent('StandardLong1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
-            L.append(name)
-        slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardLong1'))]
-        assert slice == L, slice
-
-    def test_integer_order(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('StandardInteger1')
-
-        def long_xrange(start, stop, step):
-            i = start
-            while i >= stop:
-                yield i
-                i -= step
-        L = []
-        for i in long_xrange(104294967296, 0, 429496729):
-            name = _i64(i)
-            client.insert('key1', ColumnParent('StandardInteger1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
-            L.append(name)
-        slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardInteger1'))]
-        L.sort()
-        assert slice == L, slice
-
-    def test_time_uuid(self):
-        _set_keyspace('Keyspace2')
-        self.truncate_all('Super4')
-
-        import uuid
-        L = []
-
-        # 100 isn't enough to fail reliably if the comparator is borked
-        for i in xrange(500):
-            L.append(uuid.uuid1())
-            client.insert('key1', ColumnParent('Super4', 'sc1'), Column(L[-1].bytes, 'value%s' % i, i), ConsistencyLevel.ONE)
-        slice = _big_slice('key1', ColumnParent('Super4', 'sc1'))
-        assert len(slice) == 500, len(slice)
-        for i in xrange(500):
-            u = slice[i].column
-            assert u.value == 'value%s' % i
-            assert u.name == L[i].bytes
-
-        p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
-        column_parent = ColumnParent('Super4', 'sc1')
-        slice = [result.column
-                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
-        assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
-
-        p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
-        column_parent = ColumnParent('Super4', 'sc1')
-        slice = [result.column
-                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
-        assert slice == [Column(L[0].bytes, 'value0', 0),
-                         Column(L[1].bytes, 'value1', 1),
-                         Column(L[2].bytes, 'value2', 2)], slice
-
-        p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
-        column_parent = ColumnParent('Super4', 'sc1')
-        slice = [result.column
-                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
-        assert slice == [Column(L[2].bytes, 'value2', 2),
-                         Column(L[1].bytes, 'value1', 1),
-                         Column(L[0].bytes, 'value0', 0)], slice
-
-        p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
-        column_parent = ColumnParent('Super4', 'sc1')
-        slice = [result.column
-                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
-        assert slice == [Column(L[2].bytes, 'value2', 2)], slice
-
-    def test_long_remove(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('StandardLong1')
-
-        column_parent = ColumnParent('StandardLong1')
-        sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
-        for i in xrange(10):
-            parent = ColumnParent('StandardLong1')
-
-            client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
-            client.remove('key1', ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
-            slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
-            assert slice == [], slice
-            # resurrect
-            client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
-            slice = [result.column
-                     for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
-            assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
-
-    def test_integer_remove(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('StandardInteger1')
-
-        column_parent = ColumnParent('StandardInteger1')
-        sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
-        for i in xrange(10):
-            parent = ColumnParent('StandardInteger1')
-
-            client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
-            client.remove('key1', ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
-            slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
-            assert slice == [], slice
-            # resurrect
-            client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
-            slice = [result.column
-                     for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
-            assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
-
-    def test_batch_insert(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Standard2')
-        _insert_batch()
-        _verify_batch()
-
-    def test_batch_mutate_standard_columns(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Standard2')
-
-        column_families = ['Standard1', 'Standard2']
-        keys = ['key_%d' % i for i in range(27, 32)]
-        mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
-        mutation_map = dict((column_family, mutations) for column_family in column_families)
-        keyed_mutations = dict((key, mutation_map) for key in keys)
-
-        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
-
-        for column_family in column_families:
-            for key in keys:
-                _assert_column(column_family, key, 'c1', 'value1')
-
-    def test_batch_mutate_remove_standard_columns(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Standard2')
-
-        column_families = ['Standard1', 'Standard2']
-        keys = ['key_%d' % i for i in range(11, 21)]
-        _insert_multi(keys)
-
-        mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
-        mutation_map = dict((column_family, mutations) for column_family in column_families)
-
-        keyed_mutations = dict((key, mutation_map) for key in keys)
-
-        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
-
-        for column_family in column_families:
-            for c in _SIMPLE_COLUMNS:
-                for key in keys:
-                    _assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
-
-    def test_batch_mutate_remove_standard_row(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Standard2')
-
-        column_families = ['Standard1', 'Standard2']
-        keys = ['key_%d' % i for i in range(11, 21)]
-        _insert_multi(keys)
-
-        mutations = [Mutation(deletion=Deletion(20))]
-        mutation_map = dict((column_family, mutations) for column_family in column_families)
-
-        keyed_mutations = dict((key, mutation_map) for key in keys)
-
-        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
-
-        for column_family in column_families:
-            for c in _SIMPLE_COLUMNS:
-                for key in keys:
-                    _assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
-
-    def test_batch_mutate_remove_super_columns_with_standard_under(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1', 'Super2')
-
-        column_families = ['Super1', 'Super2']
-        keys = ['key_%d' % i for i in range(11, 21)]
-        _insert_super()
-
-        mutations = []
-        for sc in _SUPER_COLUMNS:
-            names = []
-            for c in sc.columns:
-                names.append(c.name)
-            mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
-
-        mutation_map = dict((column_family, mutations) for column_family in column_families)
-
-        keyed_mutations = dict((key, mutation_map) for key in keys)
-
-        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
-        for column_family in column_families:
-            for sc in _SUPER_COLUMNS:
-                for c in sc.columns:
-                    for key in keys:
-                        _assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
-
-    def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        keys = ['key_%d' % i for i in range(17, 21)]
-
-        for key in keys:
-            _insert_super(key)
-
-        mutations = []
-
-        for sc in _SUPER_COLUMNS:
-            mutations.append(Mutation(deletion=Deletion(20,
-                                                        super_column=sc.name)))
-
-        mutation_map = {'Super1': mutations}
-
-        keyed_mutations = dict((key, mutation_map) for key in keys)
-
-        # Sanity check
-        for sc in _SUPER_COLUMNS:
-            for key in keys:
-                _assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
-
-        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
-
-        for sc in _SUPER_COLUMNS:
-            for c in sc.columns:
-                for key in keys:
-                    _assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
-
-    def test_batch_mutate_remove_super_columns_entire_row(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        keys = ['key_%d' % i for i in range(17, 21)]
-
-        for key in keys:
-            _insert_super(key)
-
-        mutations = []
-
-        mutations.append(Mutation(deletion=Deletion(20)))
-
-        mutation_map = {'Super1': mutations}
-
-        keyed_mutations = dict((key, mutation_map) for key in keys)
-
-        # Sanity check
-        for sc in _SUPER_COLUMNS:
-            for key in keys:
-                _assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
-
-        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
-
-        for sc in _SUPER_COLUMNS:
-            for key in keys:
-                _assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
-
-    # known failure: see CASSANDRA-10046
-    def test_batch_mutate_remove_slice_standard(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        columns = [Column('c1', 'value1', 0),
-                   Column('c2', 'value2', 0),
-                   Column('c3', 'value3', 0),
-                   Column('c4', 'value4', 0),
-                   Column('c5', 'value5', 0)]
-
-        for column in columns:
-            client.insert('key', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
-
-        d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start='c2', finish='c4')))
-        client.batch_mutate({'key': {'Standard1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
-
-        _assert_columnpath_exists('key', ColumnPath('Standard1', column='c1'))
-        _assert_no_columnpath('key', ColumnPath('Standard1', column='c2'))
-        _assert_no_columnpath('key', ColumnPath('Standard1', column='c3'))
-        _assert_no_columnpath('key', ColumnPath('Standard1', column='c4'))
-        _assert_columnpath_exists('key', ColumnPath('Standard1', column='c5'))
-
-    # known failure: see CASSANDRA-10046
-    def test_batch_mutate_remove_slice_of_entire_supercolumns(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        columns = [SuperColumn(name='sc1', columns=[Column(_i64(1), 'value1', 0)]),
-                   SuperColumn(name='sc2',
-                               columns=[Column(_i64(2), 'value2', 0), Column(_i64(3), 'value3', 0)]),
-                   SuperColumn(name='sc3', columns=[Column(_i64(4), 'value4', 0)]),
-                   SuperColumn(name='sc4',
-                               columns=[Column(_i64(5), 'value5', 0), Column(_i64(6), 'value6', 0)]),
-                   SuperColumn(name='sc5', columns=[Column(_i64(7), 'value7', 0)])]
-
-        for column in columns:
-            for subcolumn in column.columns:
-                client.insert('key', ColumnParent('Super1', column.name), subcolumn, ConsistencyLevel.ONE)
-
-        d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start='sc2', finish='sc4')))
-        client.batch_mutate({'key': {'Super1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
-
-        _assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(1)))
-        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc2', column=_i64(2)))
-        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc2', column=_i64(3)))
-        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc3', column=_i64(4)))
-        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc4', column=_i64(5)))
-        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc4', column=_i64(6)))
-        _assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc5', column=_i64(7)))
-
-    @since('1.0', '2.2')
-    def test_batch_mutate_remove_slice_part_of_supercolumns(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        columns = [Column(_i64(1), 'value1', 0),
-                   Column(_i64(2), 'value2', 0),
-                   Column(_i64(3), 'value3', 0),
-                   Column(_i64(4), 'value4', 0),
-                   Column(_i64(5), 'value5', 0)]
-
-        for column in columns:
-            client.insert('key', ColumnParent('Super1', 'sc1'), column, ConsistencyLevel.ONE)
-
-        r = SliceRange(start=_i64(2), finish=_i64(4))
-        d = Deletion(1, super_column='sc1', predicate=SlicePredicate(slice_range=r))
-        client.batch_mutate({'key': {'Super1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
-
-        _assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(1)))
-        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(2)))
-        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(3)))
-        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(4)))
-        _assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(5)))
-
-    def test_batch_mutate_insertions_and_deletions(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1', 'Super2')
-
-        first_insert = SuperColumn("sc1",
-                                   columns=[Column(_i64(20), 'value20', 3),
-                                            Column(_i64(21), 'value21', 3)])
-        second_insert = SuperColumn("sc1",
-                                    columns=[Column(_i64(20), 'value20', 3),
-                                             Column(_i64(21), 'value21', 3)])
-        first_deletion = {'super_column': "sc1",
-                          'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
-        second_deletion = {'super_column': "sc2",
-                           'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
-
-        keys = ['key_30', 'key_31']
-        for key in keys:
-            sc = SuperColumn('sc1', [Column(_i64(22), 'value22', 0),
-                                     Column(_i64(23), 'value23', 0)])
-            cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
-            client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
-
-            sc2 = SuperColumn('sc2', [Column(_i64(22), 'value22', 0),
-                                      Column(_i64(23), 'value23', 0)])
-            cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
-            client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
-
-        cfmap3 = {
-            'Super1': [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
-                       Mutation(deletion=Deletion(3, **first_deletion))],
-
-            'Super2': [Mutation(deletion=Deletion(2, **second_deletion)),
-                       Mutation(ColumnOrSuperColumn(super_column=second_insert))]
-        }
-
-        keyed_mutations = dict((key, cfmap3) for key in keys)
-        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
-
-        for key in keys:
-            for c in [_i64(22), _i64(23)]:
-                _assert_no_columnpath(key, ColumnPath('Super1', super_column='sc1', column=c))
-                _assert_no_columnpath(key, ColumnPath('Super2', super_column='sc2', column=c))
-
-            for c in [_i64(20), _i64(21)]:
-                _assert_columnpath_exists(key, ColumnPath('Super1', super_column='sc1', column=c))
-                _assert_columnpath_exists(key, ColumnPath('Super2', super_column='sc1', column=c))
-
-    def test_bad_system_calls(self):
-        def duplicate_index_names():
-            _set_keyspace('Keyspace1')
-            cd1 = ColumnDef('foo', 'BytesType', IndexType.KEYS, 'i')
-            cd2 = ColumnDef('bar', 'BytesType', IndexType.KEYS, 'i')
-            cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
-            client.system_add_column_family(cf)
-        _expect_exception(duplicate_index_names, InvalidRequestException)
-
-    def test_bad_batch_calls(self):
-        # mutate_does_not_accept_cosc_and_deletion_in_same_mutation
-        def too_full():
-            _set_keyspace('Keyspace1')
-            col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
-            dele = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
-            client.batch_mutate({'key_34': {'Standard1': [Mutation(col, dele)]}},
-                                ConsistencyLevel.ONE)
-        _expect_exception(too_full, InvalidRequestException)
-
-        # test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
-        def bad_cf():
-            _set_keyspace('Keyspace1')
-            col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
-            client.batch_mutate({'key_36': {'Undefined': [Mutation(col)]}},
-                                ConsistencyLevel.ONE)
-        _expect_exception(bad_cf, InvalidRequestException)
-
-        # test_batch_mutate_does_not_accept_deletion_on_undefined_cf
-        def bad_cf_2():
-            _set_keyspace('Keyspace1')
-            d = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
-            client.batch_mutate({'key_37': {'Undefined': [Mutation(deletion=d)]}},
-                                ConsistencyLevel.ONE)
-        _expect_exception(bad_cf_2, InvalidRequestException)
-
-        # a column value that does not match the declared validator
-        def send_string_instead_of_long():
-            _set_keyspace('Keyspace1')
-            col = ColumnOrSuperColumn(column=Column('birthdate', 'bar', 0))
-            client.batch_mutate({'key_38': {'Indexed1': [Mutation(col)]}},
-                                ConsistencyLevel.ONE)
-        _expect_exception(send_string_instead_of_long, InvalidRequestException)
-
-    def test_column_name_lengths(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        _expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
-        client.insert('key1', ColumnParent('Standard1'), Column('x' * 1, 'value', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('x' * 127, 'value', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('x' * 128, 'value', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('x' * 129, 'value', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('x' * 255, 'value', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('x' * 256, 'value', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('x' * 257, 'value', 0), ConsistencyLevel.ONE)
-        client.insert('key1', ColumnParent('Standard1'), Column('x' * (2 ** 16 - 1), 'value', 0), ConsistencyLevel.ONE)
-        _expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('x' * (2 ** 16), 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
-
-    def test_bad_calls(self):
-        _set_keyspace('Keyspace1')
-
-        # missing arguments
-        _expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
-        # supercolumn in a non-super CF
-        _expect_exception(lambda: client.insert('key1', ColumnParent('Standard1', 'x'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
-        # no supercolumn in a super CF
-        _expect_exception(lambda: client.insert('key1', ColumnParent('Super1'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
-        # column but no supercolumn in remove
-        _expect_exception(lambda: client.remove('key1', ColumnPath('Super1', column='x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
-        # super column in non-super CF
-        _expect_exception(lambda: client.remove('key1', ColumnPath('Standard1', 'y', 'x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
-        # key too long
-        _expect_exception(lambda: client.get('x' * 2 ** 16, ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
-        # empty key
-        _expect_exception(lambda: client.get('', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
-        cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
-                 'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
-        _expect_exception(lambda: client.batch_mutate({'': cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
-        # empty column name
-        _expect_exception(lambda: client.get('key1', ColumnPath('Standard1', column=''), ConsistencyLevel.ONE), InvalidRequestException)
-        # get doesn't specify column name
-        _expect_exception(lambda: client.get('key1', ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
-        # supercolumn in a non-super CF
-        _expect_exception(lambda: client.get('key1', ColumnPath('Standard1', 'x', 'y'), ConsistencyLevel.ONE), InvalidRequestException)
-        # get doesn't specify supercolumn name
-        _expect_exception(lambda: client.get('key1', ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
-        # invalid CF
-        _expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=['', '']), '', '', 5, ConsistencyLevel.ONE), InvalidRequestException)
-        # 'x' is not a valid Long
-        _expect_exception(lambda: client.insert('key1', ColumnParent('Super1', 'sc1'), Column('x', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
-        # start is not a valid Long
-        p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
-        column_parent = ColumnParent('StandardLong1')
-        _expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
-                          InvalidRequestException)
-        # start > finish
-        p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
-        column_parent = ColumnParent('StandardLong1')
-        _expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
-                          InvalidRequestException)
-        # start is not a valid Long, supercolumn version
-        p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
-        column_parent = ColumnParent('Super1', 'sc1')
-        _expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
-                          InvalidRequestException)
-        # start > finish, supercolumn version
-        p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
-        column_parent = ColumnParent('Super1', 'sc1')
-        _expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
-                          InvalidRequestException)
-        # start > finish, key version
-        _expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['']), 'z', 'a', 1, ConsistencyLevel.ONE), InvalidRequestException)
-        # ttl must be greater or equals to zero
-        column = Column('cttl1', 'value1', 0, -1)
-        _expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
-                          InvalidRequestException)
-        # don't allow super_column in Deletion for standard Columntest_expiration_with_default_ttl_and_zero_ttl
-        deletion = Deletion(1, 'supercolumn', None)
-        mutation = Mutation(deletion=deletion)
-        mutations = {'key': {'Standard1': [mutation]}}
-        _expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
-                          InvalidRequestException)
-        # 'x' is not a valid long
-        deletion = Deletion(1, 'x', None)
-        mutation = Mutation(deletion=deletion)
-        mutations = {'key': {'Super3': [mutation]}}
-        _expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
-        # counters don't support ANY
-        _expect_exception(lambda: client.add('key1', ColumnParent('Counter1', 'x'), CounterColumn('y', 1), ConsistencyLevel.ANY), InvalidRequestException)
-
-    def test_batch_insert_super(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1', 'Super2')
-
-        cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
-                            for c in _SUPER_COLUMNS],
-                 'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
-                            for c in _SUPER_COLUMNS]}
-        client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
-        _verify_super('Super1')
-        _verify_super('Super2')
-
-    def test_cf_remove_column(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        _insert_simple()
-        client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
-        _expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE))
-        assert client.get('key1', ColumnPath('Standard1', column='c2'), ConsistencyLevel.ONE).column \
-            == Column('c2', 'value2', 0)
-        assert _big_slice('key1', ColumnParent('Standard1')) \
-            == [ColumnOrSuperColumn(column=Column('c2', 'value2', 0))]
-
-        # New insert, make sure it shows up post-remove:
-        client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
-        columns = [result.column
-                   for result in _big_slice('key1', ColumnParent('Standard1'))]
-        assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
-
-        # Test resurrection.  First, re-insert the value w/ older timestamp,
-        # and make sure it stays removed
-        client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
-        columns = [result.column
-                   for result in _big_slice('key1', ColumnParent('Standard1'))]
-        assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
-        # Next, w/ a newer timestamp; it should come back:
-        client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 2), ConsistencyLevel.ONE)
-        columns = [result.column
-                   for result in _big_slice('key1', ColumnParent('Standard1'))]
-        assert columns == [Column('c1', 'value1', 2), Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
-
-    def test_cf_remove(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Super1')
-
-        _insert_simple()
-        _insert_super()
-
-        # Remove the key1:Standard1 cf; verify super is unaffected
-        client.remove('key1', ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
-        assert _big_slice('key1', ColumnParent('Standard1')) == []
-        _verify_super()
-
-        # Test resurrection.  First, re-insert a value w/ older timestamp,
-        # and make sure it stays removed:
-        client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
-        assert _big_slice('key1', ColumnParent('Standard1')) == []
-        # Next, w/ a newer timestamp; it should come back:
-        client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 4), ConsistencyLevel.ONE)
-        result = _big_slice('key1', ColumnParent('Standard1'))
-        assert result == [ColumnOrSuperColumn(column=Column('c1', 'value1', 4))], result
-
-        # check removing the entire super cf, too.
-        client.remove('key1', ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
-        assert _big_slice('key1', ColumnParent('Super1')) == []
-        assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
-
-    def test_super_cf_remove_and_range_slice(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        client.insert('key3', ColumnParent('Super1', 'sc1'), Column(_i64(1), 'v1', 0), ConsistencyLevel.ONE)
-        client.remove('key3', ColumnPath('Super1', 'sc1'), 5, ConsistencyLevel.ONE)
-
-        rows = {}
-        for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange('', '', False, 1000)), '', '', 1000, ConsistencyLevel.ONE):
-            scs = [cosc.super_column for cosc in row.columns]
-            rows[row.key] = scs
-        assert rows == {'key3': []}, rows
-
-    def test_super_cf_remove_column(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Super1')
-
-        _insert_simple()
-        _insert_super()
-
-        # Make sure remove clears out what it's supposed to, and _only_ that:
-        client.remove('key1', ColumnPath('Super1', 'sc2', _i64(5)), 5, ConsistencyLevel.ONE)
-        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
-        super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
-        assert super_columns == [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
-                                 SuperColumn(name='sc2', columns=[Column(_i64(6), 'value6', 0)])]
-        _verify_simple()
-
-        # New insert, make sure it shows up post-remove:
-        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
-        super_columns_expected = [SuperColumn(name='sc1',
-                                              columns=[Column(_i64(4), 'value4', 0)]),
-                                  SuperColumn(name='sc2',
-                                              columns=[Column(_i64(6), 'value6', 0), Column(_i64(7), 'value7', 0)])]
-
-        super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
-        assert super_columns == super_columns_expected, super_columns
-
-        # Test resurrection.  First, re-insert the value w/ older timestamp,
-        # and make sure it stays removed:
-        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
-
-        super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
-        assert super_columns == super_columns_expected, super_columns
-
-        # Next, w/ a newer timestamp; it should come back
-        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
-        super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
-        super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
-                                  SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6),
-                                                                   Column(_i64(6), 'value6', 0),
-                                                                   Column(_i64(7), 'value7', 0)])]
-        assert super_columns == super_columns_expected, super_columns
-
-        # shouldn't be able to specify a column w/o a super column for remove
-        cp = ColumnPath(column_family='Super1', column='sc2')
-        e = _expect_exception(lambda: client.remove('key1', cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
-        assert e.why.find("column cannot be specified without") >= 0
-
-    def test_super_cf_remove_supercolumn(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Super1')
-
-        _insert_simple()
-        _insert_super()
-
-        # Make sure remove clears out what it's supposed to, and _only_ that:
-        client.remove('key1', ColumnPath('Super1', 'sc2'), 5, ConsistencyLevel.ONE)
-        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
-        super_columns = _big_slice('key1', ColumnParent('Super1', 'sc2'))
-        assert super_columns == [], super_columns
-        super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)])]
-        super_columns = [result.super_column
-                         for result in _big_slice('key1', ColumnParent('Super1'))]
-        assert super_columns == super_columns_expected, super_columns
-        _verify_simple()
-
-        # Test resurrection.  First, re-insert the value w/ older timestamp,
-        # and make sure it stays removed:
-        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 1), ConsistencyLevel.ONE)
-        super_columns = [result.super_column
-                         for result in _big_slice('key1', ColumnParent('Super1'))]
-        assert super_columns == super_columns_expected, super_columns
-
-        # Next, w/ a newer timestamp; it should come back
-        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
-        super_columns = [result.super_column
-                         for result in _big_slice('key1', ColumnParent('Super1'))]
-        super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
-                                  SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6)])]
-        assert super_columns == super_columns_expected, super_columns
-
-        # check slicing at the subcolumn level too
-        p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
-        columns = [result.column
-                   for result in client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)]
-        assert columns == [Column(_i64(5), 'value5', 6)], columns
-
-    def test_super_cf_resurrect_subcolumn(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        key = 'vijay'
-        client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
-
-        client.remove(key, ColumnPath('Super1', 'sc1'), 1, ConsistencyLevel.ONE)
-
-        client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 2), ConsistencyLevel.ONE)
-
-        result = client.get(key, ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE)
-        assert result.super_column.columns is not None, result.super_column
-
-    def test_empty_range(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Super1')
-
-        assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
-        _insert_simple()
-        assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
-
-    @since('2.1')
-    def test_super_cql_read_compatibility(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        _insert_super("key1")
-        _insert_super("key2")
-
-        node1 = self.cluster.nodelist()[0]
-        session = self.patient_cql_connection(node1)
-
-        session.execute('USE "Keyspace1"')
-
-        assert_all(session, "SELECT * FROM \"Super1\"",
-                   [["key1", "sc1", 4, "value4"],
-                    ["key1", "sc2", 5, "value5"],
-                    ["key1", "sc2", 6, "value6"],
-                    ["key2", "sc1", 4, "value4"],
-                    ["key2", "sc2", 5, "value5"],
-                    ["key2", "sc2", 6, "value6"]])
-
-        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1')",
-                   [["key1", "sc1", 4, "value4"],
-                    ["key1", "sc2", 5, "value5"],
-                    ["key1", "sc2", 6, "value6"]])
-
-        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2')",
-                   [["key1", "sc2", 5, "value5"],
-                    ["key1", "sc2", 6, "value6"]])
-
-        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2') AND column2 = 5",
-                   [["key1", "sc2", 5, "value5"]])
-
-        assert_all(session, "SELECT * FROM \"Super1\" WHERE key = textAsBlob('key1') AND column1 = textAsBlob('sc2')",
-                   [["key1", "sc2", 5, "value5"],
-                    ["key1", "sc2", 6, "value6"]])
-
-        assert_all(session, "SELECT column2, value FROM \"Super1\" WHERE key = textAsBlob('key1') AND column1 = textAsBlob('sc2')",
-                   [[5, "value5"],
-                    [6, "value6"]])
-
-    @since('2.1')
-    def test_super_cql_write_compatibility(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        node1 = self.cluster.nodelist()[0]
-        session = self.patient_cql_connection(node1)
-
-        session.execute('USE "Keyspace1"')
-
-        query = "INSERT INTO \"Super1\" (key, column1, column2, value) VALUES (textAsBlob(%s), textAsBlob(%s), %s, textAsBlob(%s)) USING TIMESTAMP 1234"
-        session.execute(query, ("key1", "sc1", 4, "value4"))
-        session.execute(query, ("key1", "sc2", 5, "value5"))
-        session.execute(query, ("key1", "sc2", 6, "value6"))
-        session.execute(query, ("key2", "sc1", 4, "value4"))
-        session.execute(query, ("key2", "sc2", 5, "value5"))
-        session.execute(query, ("key2", "sc2", 6, "value6"))
-
-        p = SlicePredicate(slice_range=SliceRange('sc1', 'sc2', False, 2))
-        result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
-        assert_length_equal(result, 2)
-        self.assertEqual(result[0].super_column.name, 'sc1')
-        self.assertEqual(result[0].super_column.columns[0], Column(_i64(4), 'value4', 1234))
-        self.assertEqual(result[1].super_column.name, 'sc2')
-        self.assertEqual(result[1].super_column.columns, [Column(_i64(5), 'value5', 1234), Column(_i64(6), 'value6', 1234)])
-
-    def test_range_with_remove(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        _insert_simple()
-        assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
-
-        client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
-        client.remove('key1', ColumnPath('Standard1', column='c2'), 1, ConsistencyLevel.ONE)
-        actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c2']), '', '', 1000, ConsistencyLevel.ONE)
-        assert actual == [KeySlice(columns=[], key='key1')], actual
-
-    def test_range_with_remove_cf(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        _insert_simple()
-        assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
-
-        client.remove('key1', ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
-        actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE)
-        assert actual == [KeySlice(columns=[], key='key1')], actual
-
-    def test_range_collation(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
-            client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
-
-        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '', 1000, ConsistencyLevel.ONE)
-        L = ['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'b']
-        assert len(slices) == len(L)
-        for key, ks in zip(L, slices):
-            assert key == ks.key
-
-    def test_range_partial(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
-            client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
-
-        def check_slices_against_keys(keyList, sliceList):
-            assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
-            for key, ks in zip(keyList, sliceList):
-                assert key == ks.key
-
-        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), 'a', '', 1000, ConsistencyLevel.ONE)
-        check_slices_against_keys(['a', 'b'], slices)
-
-        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '15', 1000, ConsistencyLevel.ONE)
-        check_slices_against_keys(['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15'], slices)
-
-        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '50', '51', 1000, ConsistencyLevel.ONE)
-        check_slices_against_keys(['50', '51'], slices)
-
-        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '1', '', 10, ConsistencyLevel.ONE)
-        check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
-
-    def test_get_slice_range(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        _insert_range()
-        _verify_range()
-
-    def test_get_slice_super_range(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        _insert_super_range()
-        _verify_super_range()
-
-    def test_get_range_slices_tokens(self):
-        _set_keyspace('Keyspace2')
-        self.truncate_all('Super3')
-
-        for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
-            for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
-                client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
-
-        cp = ColumnParent('Super3', 'sc1')
-        predicate = SlicePredicate(column_names=['col1', 'col3'])
-        range = KeyRange(start_token='55', end_token='55', count=100)
-        result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
-        assert len(result) == 5
-        assert result[0].columns[0].column.name == 'col1'
-        assert result[0].columns[1].column.name == 'col3'
-
-    def test_get_range_slice_super(self):
-        _set_keyspace('Keyspace2')
-        self.truncate_all('Super3')
-
-        for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
-            for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
-                client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
-
-        cp = ColumnParent('Super3', 'sc1')
-        result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
-        assert len(result) == 3
-        assert result[0].columns[0].column.name == 'col1'
-        assert result[0].columns[1].column.name == 'col3'
-
-        cp = ColumnParent('Super3')
-        result = get_range_slice(client, cp, SlicePredicate(column_names=['sc1']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
-        assert len(result) == 3
-        assert list(set(row.columns[0].super_column.name for row in result))[0] == 'sc1'
-
-    def test_get_range_slice(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
-            for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
-                client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
-        cp = ColumnParent('Standard1')
-
-        # test empty slice
-        result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key6', '', 1, ConsistencyLevel.ONE)
-        assert len(result) == 0
-
-        # test empty columns
-        result = get_range_slice(client, cp, SlicePredicate(column_names=['a']), 'key2', '', 1, ConsistencyLevel.ONE)
-        assert len(result) == 1
-        assert len(result[0].columns) == 0
-
-        # test column_names predicate
-        result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
-        assert len(result) == 3, result
-        assert result[0].columns[0].column.name == 'col1'
-        assert result[0].columns[1].column.name == 'col3'
-
-        # row limiting via count.
-        result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 1, ConsistencyLevel.ONE)
-        assert len(result) == 1
-
-        # test column slice predicate
-        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
-        assert len(result) == 2
-        assert result[0].key == 'key1'
-        assert result[1].key == 'key2'
-        assert len(result[0].columns) == 3
-        assert result[0].columns[0].column.name == 'col2'
-        assert result[0].columns[2].column.name == 'col4'
-
-        # col limiting via count
-        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=2)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
-        assert len(result[0].columns) == 2
-
-        # and reversed
-        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col4', finish='col2', reversed=True, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
-        assert result[0].columns[0].column.name == 'col4'
-        assert result[0].columns[2].column.name == 'col2'
-
-        # row limiting via count
-        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 1, ConsistencyLevel.ONE)
-        assert len(result) == 1
-
-        # removed data
-        client.remove('key1', ColumnPath('Standard1', column='col1'), 1, ConsistencyLevel.ONE)
-        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange('', '')), 'key1', 'key2', 5, ConsistencyLevel.ONE)
-        assert len(result) == 2, result
-        assert result[0].columns[0].column.name == 'col2', result[0].columns[0].column.name
-        assert result[1].columns[0].column.name == 'col1'
-
-    def test_wrapped_range_slices(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        def copp_token(key):
-            # I cheated and generated this from Java
-            return {'a': '00530000000100000001',
-                    'b': '00540000000100000001',
-                    'c': '00550000000100000001',
-                    'd': '00560000000100000001',
-                    'e': '00580000000100000001'}[key]
-
-        for key in ['a', 'b', 'c', 'd', 'e']:
-            for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
-                client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
-        cp = ColumnParent('Standard1')
-
-        result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
-        assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e', ], [row.key for row in result]
-
-        result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
-        assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e', ], [row.key for row in result]
-
-    def test_get_slice_by_names(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1', 'Super1')
-
-        _insert_range()
-        p = SlicePredicate(column_names=['c1', 'c2'])
-        result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
-        assert len(result) == 2
-        assert result[0].column.name == 'c1'
-        assert result[1].column.name == 'c2'
-
-        _insert_super()
-        p = SlicePredicate(column_names=[_i64(4)])
-        result = client.get_slice('key1', ColumnParent('Super1', 'sc1'), p, ConsistencyLevel.ONE)
-        assert len(result) == 1
-        assert result[0].column.name == _i64(4)
-
-    def test_multiget_slice_with_compact_table(self):
-        """Insert multiple keys in a compact table and retrieve them using the multiget_slice interface"""
-
-        _set_keyspace('Keyspace1')
-
-        # create
-        cd = ColumnDef('v', 'AsciiType', None, None)
-        newcf = CfDef('Keyspace1', 'CompactColumnFamily', default_validation_class='AsciiType', column_metadata=[cd])
-        client.system_add_column_family(newcf)
-
-        CL = ConsistencyLevel.ONE
-        for i in range(0, 5):
-            client.insert('key' + str(i), ColumnParent('CompactColumnFamily'), Column('v', 'value' + str(i), 0), CL)
-        time.sleep(0.1)
-
-        p = SlicePredicate(column_names=['v'])
-        rows = client.multiget_slice(['key' + str(i) for i in range(0, 5)], ColumnParent('CompactColumnFamily'), p, ConsistencyLevel.ONE)
-
-        for i in range(0, 5):
-            key = 'key' + str(i)
-            assert key in rows
-            assert len(rows[key]) == 1
-            assert rows[key][0].column.name == 'v'
-            assert rows[key][0].column.value == 'value' + str(i)
-
-    def test_multiget_slice(self):
-        """Insert multiple keys and retrieve them using the multiget_slice interface"""
-
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        # Generate a list of 10 keys and insert them
-        num_keys = 10
-        keys = ['key' + str(i) for i in range(1, num_keys + 1)]
-        _insert_multi(keys)
-
-        # Retrieve all 10 key slices
-        rows = _big_multislice(keys, ColumnParent('Standard1'))
-
-        columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
-        # Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
-        for key in keys:
-            assert key in rows
-            assert columns == rows[key]
-
-    def test_multi_count(self):
-        """Insert multiple keys and count them using the multiget interface"""
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Standard1')
-
-        # Generate a list of 10 keys countaining 1 to 10 columns and insert them
-        num_keys = 10
-        for i in range(1, num_keys + 1):
-            key = 'key' + str(i)
-            for j in range(1, i + 1):
-                client.insert(key, ColumnParent('Standard1'), Column('c' + str(j), 'value' + str(j), 0), ConsistencyLevel.ONE)
-
-        # Count columns in all 10 keys
-        keys = ['key' + str(i) for i in range(1, num_keys + 1)]
-        p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
-        counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
-
-        # Check the returned counts
-        for i in range(1, num_keys + 1):
-            key = 'key' + str(i)
-            assert counts[key] == i
-
-    def test_batch_mutate_super_deletion(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        _insert_super('test')
-        d = Deletion(1, predicate=SlicePredicate(column_names=['sc1']))
-        cfmap = {'Super1': [Mutation(deletion=d)]}
-        client.batch_mutate({'test': cfmap}, ConsistencyLevel.ONE)
-        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE))
-
-    def test_super_reinsert(self):
-        _set_keyspace('Keyspace1')
-        self.truncate_all('Super1')
-
-        for x in xrange(3):
-            client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x), 'value', 1), ConsistencyLevel.ONE)
-
-        client.remove('key1', ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
-
-        for x in xrange(3):
-            client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x + 3), 'value', 3), ConsistencyLevel.ONE)
-
-        for n in xrange(1, 4):
-            p = SlicePredicate(slice_range=SliceRange('', '', False, n))
-            slice = client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)
-            assert len(slice) == n, "expected %s results; found %s" % (n, slice)
-
-    def test_describe_keyspace(self):
-        try:
-            client.system_drop_keyspace("ValidKsForUpdate")
-        except InvalidRequestException:
-            pass  # The keyspace doesn't exit, because this test was run in isolation.
-
-        kspaces = client.describe_keyspaces()
-        if self.cluster.version() >= '3.0':
-            assert len(kspaces) == 7, [x.name for x in kspaces]  # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth', 'system_distributed', 'system_schema']
-        elif self.cluster.version() >= '2.2':
-            assert len(kspaces) == 6, [x.name for x in kspaces]  # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth', 'system_distributed']
-        else:
-            assert len(kspaces) == 4, [x.name for x in kspaces]  # ['Keyspace2', 'Keyspace1', 'system', 'system_traces']
-
-        sysks = client.describe_keyspace("system")
-        assert sysks in kspaces
-
-        ks1 = client.describe_keyspace("Keyspace1")
-        assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
-        for cf in ks1.cf_defs:
-            if cf.name == "Standard1":
-                cf0 = cf
-                break
-        assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
-
-    def test_describe(self):
-        assert client.describe_cluster_name() == 'test'
-
-    def test_describe_ring(self):
-        assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
-
-    def test_describe_token_map(self):
-        # test/conf/cassandra.yaml specifies org.apache.cassandra.dht.ByteOrderedPartitioner
-        # which uses BytesToken, so this just tests that the string representation of the token
-        # matches a regex pattern for BytesToken.toString().
-        ring = client.describe_token_map().items()
-        if DISABLE_VNODES:
-            self.assertEqual(len(ring), 1)
-        else:
-            self.assertEqual(len(ring), int(NUM_TOKENS))
-        token, node = ring[0]
-        if not DISABLE_VNODES:
-            assert re.match("[0-9A-Fa-f]{32}", token)
-        assert node == '127.0.0.1'
-
-    def test_describe_partitioner(self):
-        # Make sure this just reads back the values from the config.
-        assert client.describe_partitioner() == "org.apache.cassandra.dht.ByteOrderedPartitioner"
-
-    def test_describe_snitch(self):
-        assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
-
-    def test_invalid_ks_names(self):
-        def invalid_keyspace():
-            client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[]))
-        _expect_exception(invalid_keyspace, InvalidRequestException)
-
-    def test_invalid_strategy_class(self):
-        def add_invalid_keyspace():
-            client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
-        exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
-        s = str(exc)
-        assert s.find("InvalidStrategyClass") > -1, s
-        assert s.find("Unable to find replication strategy") > -1, s
-
-        def update_invalid_keyspace():
-            client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[]))
-            client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
-
-        exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
-        s = str(exc)
-        assert s.find("InvalidStrategyClass") > -1, s
-        assert s.find("Unable to find replication strategy") > -1, s
-
-    def test_invalid_cf_names(self):
-        def invalid_cf():
-            _set_keyspace('Keyspace1')
-            newcf = CfDef('Keyspace1', 'in-valid')
-            client.system_add_column_family(newcf)
-        _expect_exception(invalid_cf, InvalidRequestException)
-
-        def invalid_cf_inside_new_ks():
-            cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
-            _set_keyspace('system')
-            client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
-        _expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
-
-    def test_system_cf_recreate(self):
-        "ensures that keyspaces and column familes can be dropped and recreated in short order"
-        for x in range(2):
-
-            keyspace = 'test_cf_recreate'
-            cf_name = 'recreate_cf'
-
-            # create
-            newcf = CfDef(keyspace, cf_name)
-            newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[newcf])
-            client.system_add_keyspace(newks)
-            _set_keyspace(keyspace)
-
-            # insert
-            client.insert('key0', ColumnParent(cf_name), Column('colA', 'colA-value', 0), ConsistencyLevel.ONE)
-            col1 = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)[0].column
-            assert col1.name == 'colA' and col1.value == 'colA-value'
-
-            # drop
-            client.system_drop_column_family(cf_name)
-
-            # recreate
-            client.system_add_column_family(newcf)
-
-            # query
-            cosc_list = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)
-            # this was failing prior to CASSANDRA-1477.
-            assert len(cosc_list) == 0, 'cosc length test failed'
-
-            client.system_drop_keyspace(keyspace)
-
-    def test_system_keyspace_operations(self):
-        # create.  note large RF, this is OK
-        keyspace = KsDef('CreateKeyspace',
-                         'org.apache.cassandra.locator.SimpleStrategy',
-                         {'replication_factor': '10'},
-                         cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
-        client.system_add_keyspace(keyspace)
-        newks = client.describe_keyspace('CreateKeyspace')
-        assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
-
-        _set_keyspace('CreateKeyspace')
-
-        # modify valid
-        modified_keyspace = KsDef('CreateKeyspace',
-                                  'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
-                                  {'replication_factor': '1'},
-                                  cf_defs=[])
-        client.system_update_keyspace(modified_keyspace)
-        modks = client.describe_keyspace('CreateKeyspace')
-        assert modks.strategy_class == modified_keyspace.strategy_class
-        assert modks.strategy_options == modified_keyspace.strategy_options
-
-        # check strategy options are validated on modify
-        def modify_invalid_ks():
-            client.system_update_keyspace(KsDef('CreateKeyspace',
-                                                'org.apache.cassandra.locator.SimpleStrategy',
-                                                {},
-                                                cf_defs=[]))
-        _expect_exception(modify_invalid_ks, InvalidRequestException)
-
-        # drop
-        client.system_drop_keyspace('CreateKeyspace')
-
-        def get_second_ks():
-            client.describe_keyspace('CreateKeyspace')
-        _expect_exception(get_second_ks, NotFoundException)
-
-        # check strategy options are validated on creation
-        def create_invalid_ks():
-            client.system_add_keyspace(KsDef('InvalidKeyspace',
-                                             'org.apache.cassandra.locator.SimpleStrategy',
-                                             {},
-                                             cf_defs=[]))
-        _expect_exception(create_invalid_ks, InvalidRequestException)
-
-    def test_create_then_drop_ks(self):
-        keyspace = KsDef('AddThenDrop',
-                         strategy_class='org.apache.cassandra.locator.SimpleStrategy',
-                         strategy_options={'replication_factor': '1'},
-                         cf_defs=[])
-
-        def test_existence():
-            client.describe_keyspace(keyspace.name)
-        _expect_exception(test_existence, NotFoundException)
-        client.set_keyspace('system')
-        client.system_add_keyspace(keyspace)
-        test_existence()
-        client.system_drop_keyspace(keyspace.name)
-
-    def test_column_validators(self):
-        # columndef validation for regular CF
-        ks = 'Keyspace1'
-        _set_keyspace(ks)
-        cd = ColumnDef('col', 'LongType', None, None)
-        cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
-        client.system_add_column_family(cf)
-        ks_def = client.describe_keyspace(ks)
-        assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
-
-        cp = ColumnParent('ValidatorColumnFamily')
-        col0 = Column('col', _i64(42), 0)
-        col1 = Column('col', "ceci n'est pas 64bit", 0)
-        client.insert('key0', cp, col0, ConsistencyLevel.ONE)
-        e = _expect_exception(lambda: client.insert('key1', cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
-        assert e.why.find("failed validation") >= 0
-
-        # columndef validation for super CF
-        scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
-        client.system_add_column_family(scf)
-        ks_def = client.describe_keyspace(ks)
-        assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
-
-        scp = ColumnParent('ValidatorSuperColumnFamily', 'sc1')
-        client.insert('key0', scp, col0, ConsistencyLevel.ONE)
-        e = _expect_exception(lambda: client

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[05/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/compatibility_flag_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/compatibility_flag_test.py b/upgrade_tests/compatibility_flag_test.py
index 1abeaef..f308174 100644
--- a/upgrade_tests/compatibility_flag_test.py
+++ b/upgrade_tests/compatibility_flag_test.py
@@ -1,9 +1,15 @@
-from dtest import Tester, debug
+import pytest
+import logging
+
+from dtest import Tester
 from tools.assertions import assert_all
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
-class CompatibilityFlagTest(Tester):
+@pytest.mark.upgrade_test
+class TestCompatibilityFlag(Tester):
     """
     Test 30 protocol compatibility flag
 
@@ -24,7 +30,7 @@ class CompatibilityFlagTest(Tester):
         node1.drain()
         node1.watch_log_for("DRAINED")
         node1.stop(wait_other_notice=False)
-        debug("Upgrading to current version")
+        logger.debug("Upgrading to current version")
         self.set_node_to_current_version(node1)
         node1.start(wait_for_binary_proto=True)
 
@@ -45,18 +51,17 @@ class CompatibilityFlagTest(Tester):
         node1.drain()
         node1.watch_log_for("DRAINED")
         node1.stop(wait_other_notice=False)
-        debug("Upgrading to current version")
+        logger.debug("Upgrading to current version")
         self.set_node_to_current_version(node1)
         node1.start(jvm_args=["-Dcassandra.force_3_0_protocol_version=true"], wait_for_binary_proto=True)
 
         session = self.patient_cql_connection(node1)
         self._run_test(session)
 
-    def _compatibility_flag_on_3014_test(self):
+    def test__compatibility_flag_on_3014(self):
         """
         Test compatibility between post-13004 nodes, one of which is in compatibility mode
         """
-
         cluster = self.cluster
         cluster.populate(2)
         node1, node2 = cluster.nodelist()
@@ -67,11 +72,10 @@ class CompatibilityFlagTest(Tester):
         session = self.patient_cql_connection(node1)
         self._run_test(session)
 
-    def _compatibility_flag_off_3014_test(self):
+    def test__compatibility_flag_off_3014(self):
         """
         Test compatibility between post-13004 nodes
         """
-
         cluster = self.cluster
         cluster.populate(2)
         node1, node2 = cluster.nodelist()
@@ -101,32 +105,32 @@ class CompatibilityFlagTest(Tester):
 
 
 @since('3.0.14', max_version='3.0.x')
-class CompatibilityFlag30XTest(CompatibilityFlagTest):
+class CompatibilityFlag30XTest(TestCompatibilityFlag):
 
-    def compatibility_flag_off_with_30_node_test(self):
+    def test_compatibility_flag_off_with_30_node(self):
         self._compatibility_flag_off_with_30_node_test('3.0.12')
 
-    def compatibility_flag_on_with_3_0_test(self):
+    def test_compatibility_flag_on_with_3_0(self):
         self._compatibility_flag_on_with_30_test('3.0.12')
 
-    def compatibility_flag_on_3014_test(self):
+    def test_compatibility_flag_on_3014(self):
         self._compatibility_flag_on_3014_test()
 
-    def compatibility_flag_off_3014_test(self):
+    def test_compatibility_flag_off_3014(self):
         self._compatibility_flag_off_3014_test()
 
 
 @since('3.11', max_version='4')
-class CompatibilityFlag3XTest(CompatibilityFlagTest):
+class CompatibilityFlag3XTest(TestCompatibilityFlag):
 
-    def compatibility_flag_off_with_30_node_test(self):
+    def test_compatibility_flag_off_with_30_node(self):
         self._compatibility_flag_off_with_30_node_test('3.10')
 
-    def compatibility_flag_on_with_3_0_test(self):
+    def test_compatibility_flag_on_with_3_0(self):
         self._compatibility_flag_on_with_30_test('3.10')
 
-    def compatibility_flag_on_3014_test(self):
+    def test_compatibility_flag_on_3014(self):
         self._compatibility_flag_on_3014_test()
 
-    def compatibility_flag_off_3014_test(self):
+    def test_compatibility_flag_off_3014(self):
         self._compatibility_flag_off_3014_test()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[28/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cqlsh_tests/cqlsh_tests.py
----------------------------------------------------------------------
diff --git a/cqlsh_tests/cqlsh_tests.py b/cqlsh_tests/cqlsh_tests.py
index bf2b90c..1d6e96e 100644
--- a/cqlsh_tests/cqlsh_tests.py
+++ b/cqlsh_tests/cqlsh_tests.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
 import binascii
 import csv
 import datetime
@@ -6,6 +5,9 @@ import os
 import re
 import subprocess
 import sys
+import logging
+
+import pytest
 from decimal import Decimal
 from distutils.version import LooseVersion
 from tempfile import NamedTemporaryFile
@@ -16,15 +18,16 @@ from cassandra.concurrent import execute_concurrent_with_args
 from cassandra.query import BatchStatement, BatchType
 from ccmlib import common
 
-from cqlsh_tools import monkeypatch_driver, unmonkeypatch_driver
-from dtest import Tester, debug, create_ks, create_cf
+from .cqlsh_tools import monkeypatch_driver, unmonkeypatch_driver
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import assert_all, assert_none
 from tools.data import create_c1c2_table, insert_c1c2, rows_to_list
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestCqlsh(Tester):
-    maxDiff = None
 
     @classmethod
     def setUpClass(cls):
@@ -59,13 +62,13 @@ class TestCqlsh(Tester):
 
         cmds = ['pycodestyle', '--ignore', 'E501,E402,E731', cqlsh_path] + cqlshlib_paths
 
-        debug(cmds)
+        logger.debug(cmds)
 
         p = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         stdout, stderr = p.communicate()
 
-        self.assertEqual(len(stdout), 0, stdout)
-        self.assertEqual(len(stderr), 0, stderr)
+        assert len(stdout), 0 == stdout
+        assert len(stderr), 0 == stderr
 
     def test_simple_insert(self):
 
@@ -114,7 +117,7 @@ class TestCqlsh(Tester):
                 stmt=repr(stmt),
                 routput=repr(output)
             )
-            self.assertIn(expected_substring, output, msg=msg)
+            assert expected_substring == output in msg
 
         assert_applied("INSERT INTO lwt.lwt (id, value) VALUES (1, 'one') IF NOT EXISTS")
         assert_applied("INSERT INTO lwt.lwt (id, value) VALUES (1, 'one') IF NOT EXISTS")
@@ -141,11 +144,11 @@ class TestCqlsh(Tester):
         output, err = self.run_cqlsh(node1, 'use simple; SELECT * FROM simpledate')
 
         if self.cluster.version() >= LooseVersion('3.4'):
-            self.assertIn("2143-04-19 11:21:01.000000+0000", output)
-            self.assertIn("1943-04-19 11:21:01.000000+0000", output)
+            assert "2143-04-19 11:21:01.000000+0000" in output
+            assert "1943-04-19 11:21:01.000000+0000" in output
         else:
-            self.assertIn("2143-04-19 11:21:01+0000", output)
-            self.assertIn("1943-04-19 11:21:01+0000", output)
+            assert "2143-04-19 11:21:01+0000" in output
+            assert "1943-04-19 11:21:01+0000" in output
 
     @since('3.4')
     def test_sub_second_precision(self):
@@ -168,27 +171,27 @@ class TestCqlsh(Tester):
         output, err, _ = node1.run_cqlsh(cmds="use simple; SELECT * FROM testsubsecond "
                                          "WHERE id = 1 AND subid = '1943-06-19 11:21:01.123+0000'")
 
-        debug(output)
-        self.assertIn("1943-06-19 11:21:01.123000+0000", output)
-        self.assertNotIn("1943-06-19 11:21:01.000000+0000", output)
+        logger.debug(output)
+        assert "1943-06-19 11:21:01.123000+0000" in output
+        assert "1943-06-19 11:21:01.000000+0000" not in output
 
         output, err, _ = node1.run_cqlsh(cmds="use simple; SELECT * FROM testsubsecond "
                                          "WHERE id = 2 AND subid = '1943-06-19 11:21:01+0000'")
 
-        debug(output)
-        self.assertIn("1943-06-19 11:21:01.000000+0000", output)
-        self.assertNotIn("1943-06-19 11:21:01.123000+0000", output)
+        logger.debug(output)
+        assert "1943-06-19 11:21:01.000000+0000" in output
+        assert "1943-06-19 11:21:01.123000+0000" not in output
 
     def verify_glass(self, node):
         session = self.patient_cql_connection(node)
 
         def verify_varcharmap(map_name, expected, encode_value=False):
-            rows = list(session.execute((u"SELECT %s FROM testks.varcharmaptable WHERE varcharkey= '᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ ᚅᚔᚋᚌᚓᚅᚐ᚜';" % map_name).encode("utf-8")))
+            rows = list(session.execute(("SELECT %s FROM testks.varcharmaptable WHERE varcharkey= '᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ ᚅᚔᚋᚌᚓᚅᚐ᚜';" % map_name).encode("utf-8")))
             if encode_value:
-                got = {k.encode("utf-8"): v.encode("utf-8") for k, v in rows[0][0].iteritems()}
+                got = {k.encode("utf-8"): v.encode("utf-8") for k, v in rows[0][0].items()}
             else:
-                got = {k.encode("utf-8"): v for k, v in rows[0][0].iteritems()}
-            self.assertEqual(got, expected)
+                got = {k.encode("utf-8"): v for k, v in rows[0][0].items()}
+            assert got == expected
 
         verify_varcharmap('varcharasciimap', {
             'Vitrum edere possum, mihi non nocet.': 'Hello',
@@ -297,9 +300,9 @@ class TestCqlsh(Tester):
 
         output, err = self.run_cqlsh(node, 'use testks; SELECT * FROM varcharmaptable', ['--encoding=utf-8'])
 
-        self.assertEquals(output.count('Можам да јадам стакло, а не ме штета.'), 16)
-        self.assertEquals(output.count(' ⠊⠀⠉⠁⠝⠀⠑⠁⠞⠀⠛⠇⠁⠎⠎⠀⠁⠝⠙⠀⠊⠞⠀⠙⠕⠑⠎⠝⠞⠀⠓⠥⠗⠞⠀⠍⠑'), 16)
-        self.assertEquals(output.count('᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ ᚅᚔᚋᚌᚓᚅᚐ᚜'), 2)
+        assert output.decode("utf-8").count('Можам да јадам стакло, а не ме штета.') == 16
+        assert output.decode("utf-8").count(' ⠊⠀⠉⠁⠝⠀⠑⠁⠞⠀⠛⠇⠁⠎⠎⠀⠁⠝⠙⠀⠊⠞⠀⠙⠕⠑⠎⠝⠞⠀⠓⠥⠗⠞⠀⠍⠑') == 16
+        assert output.decode("utf-8").count('᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ ᚅᚔᚋᚌᚓᚅᚐ᚜') == 2
 
     def test_eat_glass(self):
 
@@ -308,7 +311,7 @@ class TestCqlsh(Tester):
 
         node1, = self.cluster.nodelist()
 
-        node1.run_cqlsh(cmds=u"""create KEYSPACE testks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};
+        node1.run_cqlsh(cmds="""create KEYSPACE testks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};
 use testks;
 
 CREATE TABLE varcharmaptable (
@@ -439,16 +442,15 @@ UPDATE varcharmaptable SET varcharvarintmap['Vitrum edere possum, mihi non nocet
         Ensure that syntax errors involving unicode are handled correctly.
         @jira_ticket CASSANDRA-11626
         """
-
         self.cluster.populate(1)
         self.cluster.start(wait_for_binary_proto=True)
 
         node1, = self.cluster.nodelist()
 
-        output, err, _ = node1.run_cqlsh(cmds=u"ä;".encode('utf8'))
+        output, err, _ = node1.run_cqlsh(cmds="ä;".encode('utf8'))
         err = err.decode('utf8')
-        self.assertIn(u'Invalid syntax', err)
-        self.assertIn(u'ä', err)
+        assert 'Invalid syntax' in err
+        assert 'ä' in err
 
     @since('2.2')
     def test_unicode_invalid_request_error(self):
@@ -461,12 +463,12 @@ UPDATE varcharmaptable SET varcharvarintmap['Vitrum edere possum, mihi non nocet
 
         node1, = self.cluster.nodelist()
 
-        cmd = u'''create keyspace "ä" WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};'''
+        cmd = '''create keyspace "ä" WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};'''
         cmd = cmd.encode('utf8')
         output, err, _ = node1.run_cqlsh(cmds=cmd, cqlsh_options=["--debug"])
 
         err = err.decode('utf8')
-        self.assertIn(u'"ä" is not a valid keyspace name', err)
+        assert '"ä" is not a valid keyspace name' in err
 
     def test_with_empty_values(self):
         """
@@ -477,7 +479,7 @@ UPDATE varcharmaptable SET varcharvarintmap['Vitrum edere possum, mihi non nocet
 
         node1, = self.cluster.nodelist()
 
-        node1.run_cqlsh(cmds=u"""create keyspace  CASSANDRA_7196 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} ;
+        node1.run_cqlsh(cmds="""create keyspace  CASSANDRA_7196 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} ;
 
 use CASSANDRA_7196;
 
@@ -536,7 +538,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
 
         output, err = self.run_cqlsh(node1, "select intcol, bigintcol, varintcol from CASSANDRA_7196.has_all_types where num in (0, 1, 2, 3, 4)")
         if common.is_win():
-            output = output.replace('\r', '')
+            output = output.decode("utf-8").replace('\r', '')
 
         expected = """
  intcol      | bigintcol            | varintcol
@@ -547,9 +549,9 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
  -2147483648 | -9223372036854775808 | -10000000000000000000000000
              |                      |                            \n\n(5 rows)"""
 
-        self.assertTrue(expected in output, "Output \n {%s} \n doesn't contain expected\n {%s}" % (output, expected))
+        assert expected in output, "Output \n {%s} \n doesn't contain expected\n {%s}" % (output, expected)
 
-    def tracing_from_system_traces_test(self):
+    def test_tracing_from_system_traces(self):
         self.cluster.populate(1).start(wait_for_binary_proto=True)
 
         node1, = self.cluster.nodelist()
@@ -562,15 +564,15 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
         insert_c1c2(session, n=100)
 
         out, err = self.run_cqlsh(node1, 'TRACING ON; SELECT * FROM ks.cf')
-        self.assertIn('Tracing session: ', out)
+        assert 'Tracing session: ' in out
 
         out, err = self.run_cqlsh(node1, 'TRACING ON; SELECT * FROM system_traces.events')
-        self.assertNotIn('Tracing session: ', out)
+        assert 'Tracing session: ' not in out
 
         out, err = self.run_cqlsh(node1, 'TRACING ON; SELECT * FROM system_traces.sessions')
-        self.assertNotIn('Tracing session: ', out)
+        assert 'Tracing session: ' not in out
 
-    def select_element_inside_udt_test(self):
+    def test_select_element_inside_udt(self):
         self.cluster.populate(1).start()
 
         node1, = self.cluster.nodelist()
@@ -601,7 +603,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
             """)
 
         out, err = self.run_cqlsh(node1, "SELECT name.lastname FROM ks.users WHERE id=62c36092-82a1-3a00-93d1-46196ee77204")
-        self.assertNotIn('list index out of range', err)
+        assert 'list index out of range' not in err
         # If this assertion fails check CASSANDRA-7891
 
     def verify_output(self, query, node, expected):
@@ -609,10 +611,10 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
         if common.is_win():
             output = output.replace('\r', '')
 
-        self.assertEqual(len(err), 0, "Failed to execute cqlsh: {}".format(err))
+        assert len(err), 0 == "Failed to execute cqlsh: {}".format(err)
 
-        debug(output)
-        self.assertTrue(expected in output, "Output \n {%s} \n doesn't contain expected\n {%s}" % (output, expected))
+        logger.debug(output)
+        assert expected in output, "Output \n {%s} \n doesn't contain expected\n {%s}" % (output, expected)
 
     def test_list_queries(self):
         config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator',
@@ -695,8 +697,8 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
 
         # Describe keyspaces
         output = self.execute(cql="DESCRIBE KEYSPACES")
-        self.assertIn("test", output)
-        self.assertIn("system", output)
+        assert "test" in output
+        assert "system" in output
 
         # Describe keyspace
         self.execute(cql="DESCRIBE KEYSPACE test", expected_output=self.get_keyspace_output())
@@ -775,8 +777,8 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
                         "'min_threshold': 10, 'max_threshold': 100 }")
         describe_cmd = 'DESCRIBE ks.tab'
         stdout, _ = self.run_cqlsh(node, describe_cmd)
-        self.assertIn("'min_threshold': '10'", stdout)
-        self.assertIn("'max_threshold': '100'", stdout)
+        assert "'min_threshold': '10'" in stdout
+        assert "'max_threshold': '100'" in stdout
 
     def test_describe_on_non_reserved_keywords(self):
         """
@@ -791,8 +793,8 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
         session.execute("CREATE TABLE map (key int PRIMARY KEY, val text)")
         describe_cmd = 'USE ks; DESCRIBE map'
         out, err = self.run_cqlsh(node, describe_cmd)
-        self.assertEqual("", err)
-        self.assertIn("CREATE TABLE ks.map (", out)
+        assert "" == err
+        assert "CREATE TABLE ks.map (" in out
 
     @since('3.0')
     def test_describe_mv(self):
@@ -813,7 +815,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
                 """)
 
         output = self.execute(cql="DESCRIBE KEYSPACE test")
-        self.assertIn("users_by_state", output)
+        assert "users_by_state" in output
 
         self.execute(cql='DESCRIBE MATERIALIZED VIEW test.users_by_state', expected_output=self.get_users_by_state_mv_output())
         self.execute(cql='DESCRIBE test.users_by_state', expected_output=self.get_users_by_state_mv_output())
@@ -1045,7 +1047,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
                """
 
     def execute(self, cql, expected_output=None, expected_err=None, env_vars=None):
-        debug(cql)
+        logger.debug(cql)
         node1, = self.cluster.nodelist()
         output, err = self.run_cqlsh(node1, cql, env_vars=env_vars)
 
@@ -1055,7 +1057,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
                 self.check_response(err, expected_err)
                 return
             else:
-                self.assertTrue(False, err)
+                assert False, err
 
         if expected_output:
             self.check_response(output, expected_output)
@@ -1065,7 +1067,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
     def check_response(self, response, expected_response):
         lines = [s.strip() for s in response.split("\n") if s.strip()]
         expected_lines = [s.strip() for s in expected_response.split("\n") if s.strip()]
-        self.assertEqual(expected_lines, lines)
+        assert expected_lines == lines
 
     def test_copy_to(self):
         self.cluster.populate(1).start()
@@ -1089,20 +1091,20 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
         results = list(session.execute("SELECT * FROM testcopyto"))
 
         self.tempfile = NamedTemporaryFile(delete=False)
-        debug('Exporting to csv file: %s' % (self.tempfile.name,))
+        logger.debug('Exporting to csv file: %s' % (self.tempfile.name,))
         node1.run_cqlsh(cmds="COPY ks.testcopyto TO '%s'" % (self.tempfile.name,))
 
         # session
         with open(self.tempfile.name, 'r') as csvfile:
             csvreader = csv.reader(csvfile)
-            result_list = [map(str, cql_row) for cql_row in results]
-            self.assertItemsEqual(result_list, csvreader)
+            result_list = [list(map(str, cql_row)) for cql_row in results]
+            assert result_list == csvreader
 
         # import the CSV file with COPY FROM
         session.execute("TRUNCATE ks.testcopyto")
         node1.run_cqlsh(cmds="COPY ks.testcopyto FROM '%s'" % (self.tempfile.name,))
         new_results = list(session.execute("SELECT * FROM testcopyto"))
-        self.assertItemsEqual(results, new_results)
+        assert results == new_results
 
     def test_float_formatting(self):
         """ Tests for CASSANDRA-9224, check format of float and double values"""
@@ -1298,7 +1300,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), blobAsDec
             INSERT INTO values (part, val1, val2, val3, val4) VALUES ('min', %d, %d, -32768, -128);
             INSERT INTO values (part, val1, val2, val3, val4) VALUES ('max', %d, %d, 32767, 127)""" % (-1 << 31, -1 << 63, (1 << 31) - 1, (1 << 63) - 1))
 
-        self.assertEqual(len(stderr), 0, "Failed to execute cqlsh: {}".format(stderr))
+        assert len(stderr), 0 == "Failed to execute cqlsh: {}".format(stderr)
 
         self.verify_output("select * from int_checks.values", node1, """
  part | val1        | val2                 | val3   | val4
@@ -1340,7 +1342,7 @@ CREATE TABLE int_checks.values (
                                         % (datetime.MINYEAR - 1, datetime.MINYEAR, datetime.MAXYEAR, datetime.MAXYEAR + 1,))
         # outside the MIN and MAX range it should print the number of days from the epoch
 
-        self.assertEqual(len(stderr), 0, "Failed to execute cqlsh: {}".format(stderr))
+        assert len(stderr), 0 == "Failed to execute cqlsh: {}".format(stderr)
 
         self.verify_output("select * from datetime_checks.values", node1, """
  d          | t
@@ -1381,7 +1383,7 @@ CREATE TABLE datetime_checks.values (
             INSERT INTO test (id, val) VALUES (2, 'lkjlk');
             INSERT INTO test (id, val) VALUES (3, 'iuiou')""")
 
-        self.assertEqual(len(stderr), 0, "Failed to execute cqlsh: {}".format(stderr))
+        assert len(stderr), 0 == "Failed to execute cqlsh: {}".format(stderr)
 
         self.verify_output("use tracing_checks; tracing on; select * from test", node1, """Now Tracing is enabled
 
@@ -1425,7 +1427,7 @@ Tracing session:""")
             USE client_warnings;
             CREATE TABLE test (id int, val text, PRIMARY KEY (id))""")
 
-        self.assertEqual(len(stderr), 0, "Failed to execute cqlsh: {}".format(stderr))
+        assert len(stderr), 0 == "Failed to execute cqlsh: {}".format(stderr)
 
         session = self.patient_cql_connection(node1)
         prepared = session.prepare("INSERT INTO client_warnings.test (id, val) VALUES (?, 'abc')")
@@ -1433,24 +1435,24 @@ Tracing session:""")
         batch_without_warning = BatchStatement(batch_type=BatchType.UNLOGGED)
         batch_with_warning = BatchStatement(batch_type=BatchType.UNLOGGED)
 
-        for i in xrange(max_partitions_per_batch + 1):
+        for i in range(max_partitions_per_batch + 1):
             batch_with_warning.add(prepared, (i,))
             if i < max_partitions_per_batch:
                 batch_without_warning.add(prepared, (i,))
 
         fut = session.execute_async(batch_without_warning)
         fut.result()  # wait for batch to complete before checking warnings
-        self.assertIsNone(fut.warnings)
+        assert fut.warnings is None
 
         fut = session.execute_async(batch_with_warning)
         fut.result()  # wait for batch to complete before checking warnings
-        debug(fut.warnings)
-        self.assertIsNotNone(fut.warnings)
-        self.assertEquals(1, len(fut.warnings))
-        self.assertEquals("Unlogged batch covering {} partitions detected against table [client_warnings.test]. "
-                          .format(max_partitions_per_batch + 1) +
-                          "You should use a logged batch for atomicity, or asynchronous writes for performance.",
-                          fut.warnings[0])
+        logger.debug(fut.warnings)
+        assert fut.warnings is not None
+        assert 1 == len(fut.warnings)
+        assert "Unlogged batch covering {} partitions detected against table [client_warnings.test]. "\
+                   .format(max_partitions_per_batch + 1) + "You should use a logged batch for atomicity, " \
+                                                           "or asynchronous writes for performance." \
+               == fut.warnings[0]
 
     def test_connect_timeout(self):
         """
@@ -1462,7 +1464,7 @@ Tracing session:""")
         node1, = self.cluster.nodelist()
 
         stdout, stderr = self.run_cqlsh(node1, cmds='USE system', cqlsh_options=['--debug', '--connect-timeout=10'])
-        self.assertTrue("Using connect timeout: 10 seconds" in stderr)
+        assert "Using connect timeout: 10 seconds" in stderr
 
     def test_update_schema_with_down_node(self):
         """
@@ -1481,12 +1483,12 @@ Tracing session:""")
         stdout, stderr = self.run_cqlsh(node1, cmds="""
               CREATE KEYSPACE training WITH replication={'class':'SimpleStrategy','replication_factor':1};
               DESCRIBE KEYSPACES""", cqlsh_options=cqlsh_opts)
-        self.assertIn("training", stdout)
+        assert "training" in stdout
 
         stdout, stderr = self.run_cqlsh(node1, """USE training;
                                                   CREATE TABLE mytable (id int, val text, PRIMARY KEY (id));
                                                   describe tables""", cqlsh_options=cqlsh_opts)
-        self.assertIn("mytable", stdout)
+        assert "mytable" in stdout
 
     def test_describe_round_trip(self):
         """
@@ -1514,7 +1516,7 @@ Tracing session:""")
 
         session.execute('DROP TABLE test_ks.lcs_describe')
 
-        create_statement = 'USE test_ks; ' + ' '.join(describe_out.splitlines())
+        create_statement = 'USE test_ks; ' + ' '.join(describe_out.decode("utf-8").splitlines())
         create_out, create_err = self.run_cqlsh(node1, create_statement)
 
         # these statements shouldn't fall down
@@ -1522,7 +1524,7 @@ Tracing session:""")
         session.execute('INSERT INTO lcs_describe (key) VALUES (1)')
 
         # the table created before and after should be the same
-        self.assertEqual(reloaded_describe_out, describe_out)
+        assert reloaded_describe_out.decode("utf-8") == describe_out.decode("utf-8")
 
     @since('3.0')
     def test_materialized_view(self):
@@ -1550,31 +1552,39 @@ Tracing session:""")
         session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
 
         describe_out, err = self.run_cqlsh(node1, 'DESCRIBE MATERIALIZED VIEW test.users_by_state')
-        self.assertEqual(0, len(err), err)
+        describe_out_str = describe_out.decode("utf-8")
+        err_str = err.decode("utf-8")
+        assert 0 == len(err_str), err_str
 
         select_out, err = self.run_cqlsh(node1, "SELECT * FROM test.users_by_state")
-        self.assertEqual(0, len(err), err)
-        debug(select_out)
+        err_str = err.decode("utf-8")
+        assert 0 == len(err_str), err_str
+        logger.debug(select_out)
 
         out, err = self.run_cqlsh(node1, "DROP MATERIALIZED VIEW test.users_by_state; DESCRIBE KEYSPACE test; DESCRIBE table test.users")
-        self.assertEqual(0, len(err), err)
-        self.assertNotIn("CREATE MATERIALIZED VIEW users_by_state", out)
+        err_str = err.decode("utf-8")
+        assert 0 == len(err_str), err_str
+        assert "CREATE MATERIALIZED VIEW users_by_state" not in out
 
         out, err = self.run_cqlsh(node1, 'DESCRIBE MATERIALIZED VIEW test.users_by_state')
-        self.assertEqual(0, len(out.strip()), out)
-        self.assertIn("Materialized view 'users_by_state' not found", err)
+        describe_out_str = describe_out.decode("utf-8")
+        assert 0 == len(describe_out_str.strip()), describe_out_str
+        assert "Materialized view 'users_by_state' not found" in err
 
-        create_statement = 'USE test; ' + ' '.join(describe_out.splitlines()).strip()[:-1]
+        create_statement = 'USE test; ' + ' '.join(describe_out_str.splitlines()).strip()[:-1]
         out, err = self.run_cqlsh(node1, create_statement)
-        self.assertEqual(0, len(err), err)
+        err_str = err.decode("utf-8")
+        assert 0 == len(err_str), err_str
 
         reloaded_describe_out, err = self.run_cqlsh(node1, 'DESCRIBE MATERIALIZED VIEW test.users_by_state')
-        self.assertEqual(0, len(err), err)
-        self.assertEqual(describe_out, reloaded_describe_out)
+        err_str = err.decode("utf-8")
+        assert 0 == len(err_str), err_str
+        assert describe_out_str == reloaded_describe_out
 
         reloaded_select_out, err = self.run_cqlsh(node1, "SELECT * FROM test.users_by_state")
-        self.assertEqual(0, len(err), err)
-        self.assertEqual(select_out, reloaded_select_out)
+        err_str = err.decode("utf-8")
+        assert 0 == len(err_str), err_str
+        assert select_out == reloaded_select_out
 
     @since('3.0')
     def test_clear(self):
@@ -1615,11 +1625,11 @@ Tracing session:""")
         node1, = self.cluster.nodelist()
 
         out, err = self.run_cqlsh(node1, cmd, env_vars={'TERM': 'xterm'})
-        self.assertEqual("", err)
+        assert "" == err
 
         # Can't check escape sequence on cmd prompt. Assume no errors is good enough metric.
         if not common.is_win():
-            self.assertTrue(re.search(chr(27) + "\[[0,1,2]?J", out))
+            assert re.search(chr(27) + "\[[0,1,2]?J", out)
 
     def test_batch(self):
         """
@@ -1635,8 +1645,8 @@ Tracing session:""")
             CREATE TABLE excelsior.data (id int primary key);
             BEGIN BATCH INSERT INTO excelsior.data (id) VALUES (0); APPLY BATCH""")
 
-        self.assertEqual(0, len(stderr), stderr)
-        self.assertEqual(0, len(stdout), stdout)
+        assert 0 == len(stderr), stderr
+        assert 0 == len(stdout), stdout
 
     def run_cqlsh(self, node, cmds, cqlsh_options=None, env_vars=None):
         if env_vars is None:
@@ -1663,13 +1673,13 @@ Tracing session:""")
         return p.communicate()
 
 
-class CqlshSmokeTest(Tester):
+class TestCqlshSmoke(Tester):
     """
     Tests simple use cases for clqsh.
     """
 
-    def setUp(self):
-        super(CqlshSmokeTest, self).setUp()
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_cluster_setup(self, fixture_dtest_setup):
         self.cluster.populate(1).start(wait_for_binary_proto=True)
         [self.node1] = self.cluster.nodelist()
         self.session = self.patient_cql_connection(self.node1)
@@ -1682,22 +1692,22 @@ class CqlshSmokeTest(Tester):
         create_cf(self.session, 'test', key_type='uuid', columns={'i': 'int'})
 
         out, err, _ = self.node1.run_cqlsh("INSERT INTO ks.test (key) VALUES (uuid())")
-        self.assertEqual(err, "")
+        assert err == ""
 
         result = list(self.session.execute("SELECT key FROM ks.test"))
-        self.assertEqual(len(result), 1)
-        self.assertEqual(len(result[0]), 1)
-        self.assertIsInstance(result[0][0], UUID)
+        assert len(result) == 1
+        assert len(result[0]) == 1
+        assert isinstance(result[0][0], UUID)
 
         out, err, _ = self.node1.run_cqlsh("INSERT INTO ks.test (key) VALUES (uuid())")
-        self.assertEqual(err, "")
+        assert err == ""
 
         result = list(self.session.execute("SELECT key FROM ks.test"))
-        self.assertEqual(len(result), 2)
-        self.assertEqual(len(result[0]), 1)
-        self.assertEqual(len(result[1]), 1)
-        self.assertIsInstance(result[0][0], UUID)
-        self.assertIsInstance(result[1][0], UUID)
+        assert len(result) == 2
+        assert len(result[0]) == 1
+        assert len(result[1]) == 1
+        assert isinstance(result[0][0], UUID)
+        assert isinstance(result[1][0], UUID)
         self.assertNotEqual(result[0][0], result[1][0])
 
     def test_commented_lines(self):
@@ -1713,8 +1723,8 @@ class CqlshSmokeTest(Tester):
              * comment */
             """)
         out, err, _ = self.node1.run_cqlsh("DESCRIBE KEYSPACE ks; // post-line comment")
-        self.assertEqual(err, "")
-        self.assertTrue(out.strip().startswith("CREATE KEYSPACE ks"))
+        assert err == ""
+        assert out.strip().startswith("CREATE KEYSPACE ks")
 
     def test_colons_in_string_literals(self):
         create_ks(self.session, 'ks', 1)
@@ -1725,7 +1735,7 @@ class CqlshSmokeTest(Tester):
             INSERT INTO ks.test (key) VALUES ('Cassandra:TheMovie');
             """)
         assert_all(self.session, "SELECT key FROM test",
-                   [[u'Cassandra:TheMovie']])
+                   [['Cassandra:TheMovie']])
 
     def test_select(self):
         create_ks(self.session, 'ks', 1)
@@ -1733,31 +1743,31 @@ class CqlshSmokeTest(Tester):
 
         self.session.execute("INSERT INTO ks.test (key, c, v) VALUES ('a', 'a', 'a')")
         assert_all(self.session, "SELECT key, c, v FROM test",
-                   [[u'a', u'a', u'a']])
+                   [['a', 'a', 'a']])
 
         out, err, _ = self.node1.run_cqlsh("SELECT key, c, v FROM ks.test")
         out_lines = [x.strip() for x in out.split("\n")]
 
         # there should be only 1 row returned & it should contain the inserted values
-        self.assertIn("(1 rows)", out_lines)
-        self.assertIn("a | a | a", out_lines)
-        self.assertEqual(err, '')
+        assert "(1 rows)" in out_lines
+        assert "a | a | a" in out_lines
+        assert err == ''
 
     def test_insert(self):
         create_ks(self.session, 'ks', 1)
         create_cf(self.session, 'test')
 
         self.node1.run_cqlsh("INSERT INTO ks.test (key, c, v) VALUES ('a', 'a', 'a')")
-        assert_all(self.session, "SELECT key, c, v FROM test", [[u"a", u"a", u"a"]])
+        assert_all(self.session, "SELECT key, c, v FROM test", [["a", "a", "a"]])
 
     def test_update(self):
         create_ks(self.session, 'ks', 1)
         create_cf(self.session, 'test')
 
         self.session.execute("INSERT INTO test (key, c, v) VALUES ('a', 'a', 'a')")
-        assert_all(self.session, "SELECT key, c, v FROM test", [[u"a", u"a", u"a"]])
+        assert_all(self.session, "SELECT key, c, v FROM test", [["a", "a", "a"]])
         self.node1.run_cqlsh("UPDATE ks.test SET v = 'b' WHERE key = 'a' AND c = 'a'")
-        assert_all(self.session, "SELECT key, c, v FROM test", [[u"a", u"a", u"b"]])
+        assert_all(self.session, "SELECT key, c, v FROM test", [["a", "a", "b"]])
 
     def test_delete(self):
         create_ks(self.session, 'ks', 1)
@@ -1769,12 +1779,12 @@ class CqlshSmokeTest(Tester):
         self.session.execute("INSERT INTO test (key) VALUES ('d')")
         self.session.execute("INSERT INTO test (key) VALUES ('e')")
         assert_all(self.session, 'SELECT key from test',
-                   [[u'a'], [u'c'], [u'e'], [u'd'], [u'b']])
+                   [['a'], ['c'], ['e'], ['d'], ['b']])
 
         self.node1.run_cqlsh("DELETE FROM ks.test WHERE key = 'c'")
 
         assert_all(self.session, 'SELECT key from test',
-                   [[u'a'], [u'e'], [u'd'], [u'b']])
+                   [['a'], ['e'], ['d'], ['b']])
 
     def test_batch(self):
         create_ks(self.session, 'ks', 1)
@@ -1790,27 +1800,27 @@ class CqlshSmokeTest(Tester):
             ''')
         # make sure everything inserted is actually there
         assert_all(self.session, 'SELECT key FROM ks.test',
-                   [[u'eggs'], [u'spam'], [u'sausage']])
+                   [['eggs'], ['spam'], ['sausage']])
 
     def test_create_keyspace(self):
-        self.assertNotIn(u'created', self.get_keyspace_names())
+        assert 'created' not in self.get_keyspace_names()
 
         self.node1.run_cqlsh("CREATE KEYSPACE created WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }")
-        self.assertIn(u'created', self.get_keyspace_names())
+        assert 'created' in self.get_keyspace_names()
 
     def test_drop_keyspace(self):
         create_ks(self.session, 'ks', 1)
-        self.assertIn(u'ks', self.get_keyspace_names())
+        assert 'ks' in self.get_keyspace_names()
 
         self.node1.run_cqlsh('DROP KEYSPACE ks')
 
-        self.assertNotIn(u'ks', self.get_keyspace_names())
+        assert 'ks' not in self.get_keyspace_names()
 
     def test_create_table(self):
         create_ks(self.session, 'ks', 1)
 
         self.node1.run_cqlsh('CREATE TABLE ks.test (i int PRIMARY KEY);')
-        self.assertEquals(self.get_tables_in_keyspace('ks'), [u'test'])
+        assert self.get_tables_in_keyspace('ks') == ['test']
 
     def test_drop_table(self):
         create_ks(self.session, 'ks', 1)
@@ -1821,7 +1831,7 @@ class CqlshSmokeTest(Tester):
         self.node1.run_cqlsh('DROP TABLE ks.test;')
         self.session.cluster.refresh_schema_metadata()
 
-        self.assertEqual(0, len(self.session.cluster.metadata.keyspaces['ks'].tables))
+        assert 0 == len(self.session.cluster.metadata.keyspaces['ks'].tables)
 
     def test_truncate(self):
         create_ks(self.session, 'ks', 1)
@@ -1833,10 +1843,10 @@ class CqlshSmokeTest(Tester):
         self.session.execute("INSERT INTO test (key) VALUES ('d')")
         self.session.execute("INSERT INTO test (key) VALUES ('e')")
         assert_all(self.session, 'SELECT key from test',
-                   [[u'a'], [u'c'], [u'e'], [u'd'], [u'b']])
+                   [['a'], ['c'], ['e'], ['d'], ['b']])
 
         self.node1.run_cqlsh('TRUNCATE ks.test;')
-        self.assertEqual([], rows_to_list(self.session.execute('SELECT * from test')))
+        assert [] == rows_to_list(self.session.execute('SELECT * from test'))
 
     @since('2.0', max_version='2.2')
     def test_alter_table(self):
@@ -1846,20 +1856,18 @@ class CqlshSmokeTest(Tester):
         def get_ks_columns():
             table = self.session.cluster.metadata.keyspaces['ks'].tables['test']
 
-            return [[table.name, column.name, column.cql_type] for column in table.columns.values()]
+            return [[table.name, column.name, column.cql_type] for column in list(table.columns.values())]
 
-        old_column_spec = [u'test', u'i',
-                           u'ascii']
-        self.assertIn(old_column_spec, get_ks_columns())
+        old_column_spec = ['test', 'i',
+                           'ascii']
+        assert old_column_spec in get_ks_columns()
 
         self.node1.run_cqlsh('ALTER TABLE ks.test ALTER i TYPE text;')
         self.session.cluster.refresh_table_metadata("ks", "test")
 
         new_columns = get_ks_columns()
-        self.assertNotIn(old_column_spec, new_columns)
-        self.assertIn([u'test', u'i',
-                       u'text'],
-                      new_columns)
+        assert old_column_spec not in new_columns
+        assert ['test', 'i', 'text'] in new_columns
 
     def test_use_keyspace(self):
         # ks1 contains ks1table, ks2 contains ks2table
@@ -1873,16 +1881,16 @@ class CqlshSmokeTest(Tester):
             USE ks1;
             DESCRIBE TABLES;
             ''')
-        self.assertEqual([x for x in ks1_stdout.split() if x], ['ks1table'])
-        self.assertEqual(ks1_stderr, '')
+        assert [x for x in ks1_stdout.split() if x] == ['ks1table']
+        assert ks1_stderr == ''
 
         ks2_stdout, ks2_stderr, _ = self.node1.run_cqlsh(
             '''
             USE ks2;
             DESCRIBE TABLES;
             ''')
-        self.assertEqual([x for x in ks2_stdout.split() if x], ['ks2table'])
-        self.assertEqual(ks2_stderr, '')
+        assert [x for x in ks2_stdout.split() if x] == ['ks2table']
+        assert ks2_stderr == ''
 
     # DROP INDEX statement fails in 2.0 (see CASSANDRA-9247)
     def test_drop_index(self):
@@ -1896,7 +1904,7 @@ class CqlshSmokeTest(Tester):
             return self.session.execute(requires_index)
 
         # make sure it fails as expected
-        self.assertRaises(InvalidRequest, execute_requires_index)
+        pytest.raises(InvalidRequest, execute_requires_index)
 
         # make sure it doesn't fail when an index exists
         self.session.execute('CREATE INDEX index_to_drop ON test (i);')
@@ -1904,7 +1912,7 @@ class CqlshSmokeTest(Tester):
 
         # drop the index via cqlsh, then make sure it fails
         self.node1.run_cqlsh('DROP INDEX ks.index_to_drop;')
-        self.assertRaises(InvalidRequest, execute_requires_index)
+        pytest.raises(InvalidRequest, execute_requires_index)
 
     # DROP INDEX statement fails in 2.0 (see CASSANDRA-9247)
     def test_create_index(self):
@@ -1918,7 +1926,7 @@ class CqlshSmokeTest(Tester):
             return self.session.execute(requires_index)
 
         # make sure it fails as expected
-        self.assertRaises(InvalidRequest, execute_requires_index)
+        pytest.raises(InvalidRequest, execute_requires_index)
 
         # make sure index exists after creating via cqlsh
         self.node1.run_cqlsh('CREATE INDEX index_to_drop ON ks.test (i);')
@@ -1926,15 +1934,15 @@ class CqlshSmokeTest(Tester):
 
         # drop the index, then make sure it fails again
         self.session.execute('DROP INDEX ks.index_to_drop;')
-        self.assertRaises(InvalidRequest, execute_requires_index)
+        pytest.raises(InvalidRequest, execute_requires_index)
 
     def get_keyspace_names(self):
         self.session.cluster.refresh_schema_metadata()
-        return [ks.name for ks in self.session.cluster.metadata.keyspaces.values()]
+        return [ks.name for ks in list(self.session.cluster.metadata.keyspaces.values())]
 
     def get_tables_in_keyspace(self, keyspace):
         self.session.cluster.refresh_schema_metadata()
-        return [table.name for table in self.session.cluster.metadata.keyspaces[keyspace].tables.values()]
+        return [table.name for table in list(self.session.cluster.metadata.keyspaces[keyspace].tables.values())]
 
 
 class CqlLoginTest(Tester):
@@ -1942,12 +1950,13 @@ class CqlLoginTest(Tester):
     Tests login which requires password authenticator
     """
 
-    def setUp(self):
-        super(CqlLoginTest, self).setUp()
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_cluster_setup(self, fixture_dtest_setup):
+        cluster = fixture_dtest_setup.cluster
         config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator'}
-        self.cluster.set_configuration_options(values=config)
-        self.cluster.populate(1).start(wait_for_binary_proto=True)
-        [self.node1] = self.cluster.nodelist()
+        cluster.set_configuration_options(values=config)
+        cluster.populate(1).start(wait_for_binary_proto=True)
+        [self.node1] = cluster.nodelist()
         self.node1.watch_log_for('Created default superuser')
         self.session = self.patient_cql_connection(self.node1, user='cassandra', password='cassandra')
 
@@ -1956,7 +1965,7 @@ class CqlLoginTest(Tester):
                    if self.cluster.version() >= LooseVersion('3.10')
                    else "Username and/or password are incorrect")
 
-        self.assertEqual([message in x for x in input.split("\n") if x], [True])
+        assert [message in x for x in input.split("\n") if x] == [True]
 
     def test_login_keeps_keyspace(self):
         create_ks(self.session, 'ks1', 1)
@@ -1971,8 +1980,8 @@ class CqlLoginTest(Tester):
             DESCRIBE TABLES;
             ''',
             cqlsh_options=['-u', 'cassandra', '-p', 'cassandra'])
-        self.assertEqual([x for x in cqlsh_stdout.split() if x], ['ks1table', 'ks1table'])
-        self.assertEqual(cqlsh_stderr, '')
+        assert [x for x in cqlsh_stdout.split() if x], ['ks1table' == 'ks1table']
+        assert cqlsh_stderr == ''
 
     def test_login_rejects_bad_pass(self):
         create_ks(self.session, 'ks1', 1)
@@ -2009,7 +2018,7 @@ class CqlLoginTest(Tester):
             query,
             cqlsh_options=['-u', 'cassandra', '-p', 'cassandra'])
 
-        err_lines = cqlsh_stderr.splitlines()
+        err_lines = str(cqlsh_stderr).splitlines()
         for err_line in err_lines:
             if expected_error in err_line:
                 break
@@ -2030,7 +2039,7 @@ class CqlLoginTest(Tester):
             DESCRIBE TABLES;
             ''',
             cqlsh_options=['-u', 'cassandra', '-p', 'cassandra'])
-        self.assertEqual([x for x in cqlsh_stdout.split() if x], ['ks1table'])
+        assert [x for x in cqlsh_stdout.split() if x] == ['ks1table']
         self.assert_login_not_allowed('user1', cqlsh_stderr)
 
     @since('2.2')
@@ -2047,5 +2056,5 @@ class CqlLoginTest(Tester):
             LIST ROLES;
             ''',
             cqlsh_options=['-u', 'cassandra', '-p', 'cassandra'])
-        self.assertTrue('super' in out)
-        self.assertEqual('', err)
+        assert 'super' in out
+        assert '' == err

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cqlsh_tests/cqlsh_tools.py
----------------------------------------------------------------------
diff --git a/cqlsh_tests/cqlsh_tools.py b/cqlsh_tests/cqlsh_tools.py
index 6a592a0..d7aca97 100644
--- a/cqlsh_tests/cqlsh_tools.py
+++ b/cqlsh_tests/cqlsh_tools.py
@@ -2,7 +2,6 @@ import csv
 import random
 
 import cassandra
-from nose.tools import assert_items_equal
 
 
 class DummyColorMap(object):
@@ -25,7 +24,7 @@ def csv_rows(filename, delimiter=None):
 
 def assert_csvs_items_equal(filename1, filename2):
     with open(filename1, 'r') as x, open(filename2, 'r') as y:
-        assert_items_equal(list(x.readlines()), list(y.readlines()))
+        assert list(x.readlines()) == list(y.readlines())
 
 
 def random_list(gen=None, n=None):

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/delete_insert_test.py
----------------------------------------------------------------------
diff --git a/delete_insert_test.py b/delete_insert_test.py
index efc5927..e0c4bbc 100644
--- a/delete_insert_test.py
+++ b/delete_insert_test.py
@@ -1,21 +1,22 @@
 import random
 import threading
 import uuid
+import logging
 
 from cassandra import ConsistencyLevel
 from cassandra.query import SimpleStatement
-from nose.tools import assert_equal
 
 from dtest import Tester, create_ks
 
+logger = logging.getLogger(__name__)
 
-class DeleteInsertTest(Tester):
+
+class TestDeleteInsert(Tester):
     """
     Examines scenarios around deleting data and adding data back with the same key
     """
     # Generate 1000 rows in memory so we can re-use the same ones over again:
-    groups = ['group1', 'group2', 'group3', 'group4']
-    rows = [(str(uuid.uuid1()), x, random.choice(groups)) for x in range(1000)]
+    rows = [(str(uuid.uuid1()), x, random.choice(['group1', 'group2', 'group3', 'group4'])) for x in range(1000)]
 
     def create_ddl(self, session, rf={'dc1': 2, 'dc2': 2}):
         create_ks(session, 'delete_insert_search_test', rf)
@@ -36,7 +37,7 @@ class DeleteInsertTest(Tester):
         for row in rows:
             session.execute("INSERT INTO test (id, val1, group) VALUES (%s, '%s', '%s')" % row)
 
-    def delete_insert_search_test(self):
+    def test_delete_insert_search(self):
         cluster = self.cluster
         cluster.populate([2, 2]).start(wait_for_binary_proto=True)
         node1 = cluster.nodelist()[0]
@@ -63,9 +64,11 @@ class DeleteInsertTest(Tester):
 
             def run(self):
                 session = self.connection
-                query = SimpleStatement("SELECT * FROM delete_insert_search_test.test WHERE group = 'group2'", consistency_level=ConsistencyLevel.LOCAL_QUORUM)
+                query = SimpleStatement("SELECT * FROM delete_insert_search_test.test WHERE group = 'group2'",
+                                        consistency_level=ConsistencyLevel.LOCAL_QUORUM)
                 rows = session.execute(query)
-                assert_equal(len(list(rows)), len(deleted), "Expecting the length of {} to be equal to the length of {}.".format(list(rows), deleted))
+                assert len(list(rows)) == len(deleted), "Expecting the length of {} to be equal to the " \
+                                                        "length of {}.".format(list(rows), deleted)
 
         threads = []
         for x in range(20):

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/deletion_test.py
----------------------------------------------------------------------
diff --git a/deletion_test.py b/deletion_test.py
index d4258ad..9c832b3 100644
--- a/deletion_test.py
+++ b/deletion_test.py
@@ -1,14 +1,17 @@
 import time
+import logging
 
 from dtest import Tester, create_ks, create_cf
 from tools.data import rows_to_list
 from tools.jmxutils import (JolokiaAgent, make_mbean,
                             remove_perf_disable_shared_mem)
 
+logger = logging.getLogger(__name__)
+
 
 class TestDeletion(Tester):
 
-    def gc_test(self):
+    def test_gc(self):
         """
         Test that tombstone purging doesn't bring back deleted data by writing
         2 rows to a table with gc_grace=0, deleting one of those rows, then
@@ -29,23 +32,20 @@ class TestDeletion(Tester):
         session.execute('insert into cf (key, c1) values (2,1)')
         node1.flush()
 
-        self.assertEqual(rows_to_list(session.execute('select * from cf;')),
-                         [[1, 1], [2, 1]])
+        assert rows_to_list(session.execute('select * from cf;')) == [[1, 1], [2, 1]]
 
         session.execute('delete from cf where key=1')
 
-        self.assertEqual(rows_to_list(session.execute('select * from cf;')),
-                         [[2, 1]])
+        assert rows_to_list(session.execute('select * from cf;')) == [[2, 1]]
 
         node1.flush()
         time.sleep(.5)
         node1.compact()
         time.sleep(.5)
 
-        self.assertEqual(rows_to_list(session.execute('select * from cf;')),
-                         [[2, 1]])
+        assert rows_to_list(session.execute('select * from cf;')) == [[2, 1]]
 
-    def tombstone_size_test(self):
+    def test_tombstone_size(self):
         self.cluster.populate(1)
         node1 = self.cluster.nodelist()[0]
 
@@ -61,8 +61,8 @@ class TestDeletion(Tester):
         for i in range(100):
             session.execute(stmt, [i])
 
-        self.assertEqual(memtable_count(node1, 'ks', 'test'), 100)
-        self.assertGreater(memtable_size(node1, 'ks', 'test'), 0)
+        assert memtable_count(node1, 'ks', 'test') == 100
+        assert memtable_size(node1, 'ks', 'test') > 0
 
 
 def memtable_size(node, keyspace, table):

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/disk_balance_test.py
----------------------------------------------------------------------
diff --git a/disk_balance_test.py b/disk_balance_test.py
index 9eed377..a32ced9 100644
--- a/disk_balance_test.py
+++ b/disk_balance_test.py
@@ -2,16 +2,21 @@ import os
 import os.path
 import re
 
+import pytest
+import logging
+
 from ccmlib.node import Node
-from dtest import DISABLE_VNODES, Tester, create_ks, debug
+from dtest import Tester, create_ks
 from tools.assertions import assert_almost_equal
 from tools.data import create_c1c2_table, insert_c1c2, query_c1c2
-from tools.decorators import since
 from tools.jmxutils import (JolokiaAgent, make_mbean,
                             remove_perf_disable_shared_mem)
 from tools.misc import new_node
 from compaction_test import grep_sstables_in_each_level
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 @since('3.2')
 class TestDiskBalance(Tester):
@@ -19,62 +24,63 @@ class TestDiskBalance(Tester):
     @jira_ticket CASSANDRA-6696
     """
 
-    def disk_balance_stress_test(self):
+    def test_disk_balance_stress(self):
         cluster = self.cluster
-        if not DISABLE_VNODES:
+        if self.dtest_config.use_vnodes:
             cluster.set_configuration_options(values={'num_tokens': 256})
         cluster.populate(4).start(wait_for_binary_proto=True)
         node1 = cluster.nodes['node1']
 
-        node1.stress(['write', 'n=50k', 'no-warmup', '-rate', 'threads=100', '-schema', 'replication(factor=3)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)'])
+        node1.stress(['write', 'n=50k', 'no-warmup', '-rate', 'threads=100', '-schema', 'replication(factor=3)',
+                      'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)'])
         cluster.flush()
         # make sure the data directories are balanced:
         for node in cluster.nodelist():
             self.assert_balanced(node)
 
-    def disk_balance_bootstrap_test(self):
+    def test_disk_balance_bootstrap(self):
         cluster = self.cluster
-        if not DISABLE_VNODES:
+        if self.dtest_config.use_vnodes:
             cluster.set_configuration_options(values={'num_tokens': 256})
         # apparently we have legitimate errors in the log when bootstrapping (see bootstrap_test.py)
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
         cluster.populate(4).start(wait_for_binary_proto=True)
         node1 = cluster.nodes['node1']
 
-        node1.stress(['write', 'n=50k', 'no-warmup', '-rate', 'threads=100', '-schema', 'replication(factor=3)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)'])
+        node1.stress(['write', 'n=50k', 'no-warmup', '-rate', 'threads=100', '-schema', 'replication(factor=3)',
+                      'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)'])
         cluster.flush()
         node5 = new_node(cluster)
         node5.start(wait_for_binary_proto=True)
         self.assert_balanced(node5)
 
-
-    def disk_balance_replace_same_address_test(self):
+    def test_disk_balance_replace_same_address(self):
         self._test_disk_balance_replace(same_address=True)
 
-    def disk_balance_replace_different_address_test(self):
+    def test_disk_balance_replace_different_address(self):
         self._test_disk_balance_replace(same_address=False)
 
     def _test_disk_balance_replace(self, same_address):
-        debug("Creating cluster")
+        logger.debug("Creating cluster")
         cluster = self.cluster
-        if not DISABLE_VNODES:
+        if self.dtest_config.use_vnodes:
             cluster.set_configuration_options(values={'num_tokens': 256})
         # apparently we have legitimate errors in the log when bootstrapping (see bootstrap_test.py)
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
         cluster.populate(4).start(wait_for_binary_proto=True)
         node1 = cluster.nodes['node1']
 
-        debug("Populating")
+        logger.debug("Populating")
         node1.stress(['write', 'n=50k', 'no-warmup', '-rate', 'threads=100', '-schema', 'replication(factor=3)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)'])
         cluster.flush()
 
-        debug("Stopping and removing node2")
+        logger.debug("Stopping and removing node2")
         node2 = cluster.nodes['node2']
         node2.stop(gently=False)
         self.cluster.remove(node2)
 
         node5_address = node2.address() if same_address else '127.0.0.5'
-        debug("Starting replacement node")
+        logger.debug("Starting replacement node")
         node5 = Node('node5', cluster=self.cluster, auto_bootstrap=True,
                      thrift_interface=None, storage_interface=(node5_address, 7000),
                      jmx_port='7500', remote_debug_port='0', initial_token=None,
@@ -84,17 +90,18 @@ class TestDiskBalance(Tester):
                     wait_for_binary_proto=True,
                     wait_other_notice=True)
 
-        debug("Checking replacement node is balanced")
+        logger.debug("Checking replacement node is balanced")
         self.assert_balanced(node5)
 
-    def disk_balance_decommission_test(self):
+    def test_disk_balance_decommission(self):
         cluster = self.cluster
-        if not DISABLE_VNODES:
+        if self.dtest_config.use_vnodes:
             cluster.set_configuration_options(values={'num_tokens': 256})
         cluster.populate(4).start(wait_for_binary_proto=True)
         node1 = cluster.nodes['node1']
         node4 = cluster.nodes['node4']
-        node1.stress(['write', 'n=50k', 'no-warmup', '-rate', 'threads=100', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)'])
+        node1.stress(['write', 'n=50k', 'no-warmup', '-rate', 'threads=100', '-schema', 'replication(factor=2)',
+                      'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)'])
         cluster.flush()
 
         node4.decommission()
@@ -105,7 +112,7 @@ class TestDiskBalance(Tester):
         for node in cluster.nodelist():
             self.assert_balanced(node)
 
-    def blacklisted_directory_test(self):
+    def test_blacklisted_directory(self):
         cluster = self.cluster
         cluster.set_datadir_count(3)
         cluster.populate(1)
@@ -118,7 +125,7 @@ class TestDiskBalance(Tester):
         create_c1c2_table(self, session)
         insert_c1c2(session, n=10000)
         node.flush()
-        for k in xrange(0, 10000):
+        for k in range(0, 10000):
             query_c1c2(session, k)
 
         node.compact()
@@ -126,17 +133,17 @@ class TestDiskBalance(Tester):
         with JolokiaAgent(node) as jmx:
             jmx.execute_method(mbean, 'markUnwritable', [os.path.join(node.get_path(), 'data0')])
 
-        for k in xrange(0, 10000):
+        for k in range(0, 10000):
             query_c1c2(session, k)
 
         node.nodetool('relocatesstables')
 
-        for k in xrange(0, 10000):
+        for k in range(0, 10000):
             query_c1c2(session, k)
 
-    def alter_replication_factor_test(self):
+    def test_alter_replication_factor(self):
         cluster = self.cluster
-        if not DISABLE_VNODES:
+        if self.dtest_config.use_vnodes:
             cluster.set_configuration_options(values={'num_tokens': 256})
         cluster.populate(3).start(wait_for_binary_proto=True)
         node1 = cluster.nodes['node1']
@@ -159,14 +166,14 @@ class TestDiskBalance(Tester):
         assert_almost_equal(*sums, error=0.1, error_message=node.name)
 
     @since('3.10')
-    def disk_balance_after_boundary_change_stcs_test(self):
+    def test_disk_balance_after_boundary_change_stcs(self):
         """
         @jira_ticket CASSANDRA-13948
         """
         self._disk_balance_after_boundary_change_test(lcs=False)
 
     @since('3.10')
-    def disk_balance_after_boundary_change_lcs_test(self):
+    def test_disk_balance_after_boundary_change_lcs(self):
         """
         @jira_ticket CASSANDRA-13948
         """
@@ -184,13 +191,13 @@ class TestDiskBalance(Tester):
         """
 
         cluster = self.cluster
-        if not DISABLE_VNODES:
+        if self.dtest_config.use_vnodes:
             cluster.set_configuration_options(values={'num_tokens': 1024})
         num_disks = 5
         cluster.set_datadir_count(num_disks)
         cluster.set_configuration_options(values={'concurrent_compactors': num_disks})
 
-        debug("Starting node1 with {} data dirs and concurrent_compactors".format(num_disks))
+        logger.debug("Starting node1 with {} data dirs and concurrent_compactors".format(num_disks))
         cluster.populate(1).start(wait_for_binary_proto=True)
         [node1] = cluster.nodelist()
 
@@ -204,13 +211,13 @@ class TestDiskBalance(Tester):
         keys_to_write = num_flushes * keys_per_flush
 
         compaction_opts = "LeveledCompactionStrategy,sstable_size_in_mb=1" if lcs else "SizeTieredCompactionStrategy"
-        debug("Writing {} keys in {} flushes (compaction_opts={})".format(keys_to_write, num_flushes, compaction_opts))
+        logger.debug("Writing {} keys in {} flushes (compaction_opts={})".format(keys_to_write, num_flushes, compaction_opts))
         total_keys = num_flushes * keys_per_flush
         current_keys = 0
         while current_keys < total_keys:
             start_key = current_keys + 1
             end_key = current_keys + keys_per_flush
-            debug("Writing keys {}..{} and flushing".format(start_key, end_key))
+            logger.debug("Writing keys {}..{} and flushing".format(start_key, end_key))
             node1.stress(['write', 'n={}'.format(keys_per_flush), "no-warmup", "cl=ALL", "-pop",
                           "seq={}..{}".format(start_key, end_key), "-rate", "threads=1", "-schema", "replication(factor=1)",
                           "compaction(strategy={},enabled=false)".format(compaction_opts)])
@@ -218,28 +225,28 @@ class TestDiskBalance(Tester):
             current_keys = end_key
 
         # Add a new node, so disk boundaries will change
-        debug("Bootstrap node2 and flush")
+        logger.debug("Bootstrap node2 and flush")
         node2 = new_node(cluster, bootstrap=True)
         node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds=10"], set_migration_task=False)
         node2.flush()
 
         self._assert_balanced_after_boundary_change(node1, total_keys, lcs)
 
-        debug("Decommissioning node1")
+        logger.debug("Decommissioning node1")
         node1.decommission()
         node1.stop()
 
         self._assert_balanced_after_boundary_change(node2, total_keys, lcs)
 
     @since('3.10')
-    def disk_balance_after_joining_ring_stcs_test(self):
+    def test_disk_balance_after_joining_ring_stcs(self):
         """
         @jira_ticket CASSANDRA-13948
         """
         self._disk_balance_after_joining_ring_test(lcs=False)
 
     @since('3.10')
-    def disk_balance_after_joining_ring_lcs_test(self):
+    def test_disk_balance_after_joining_ring_lcs(self):
         """
         @jira_ticket CASSANDRA-13948
         """
@@ -257,13 +264,13 @@ class TestDiskBalance(Tester):
         """
 
         cluster = self.cluster
-        if not DISABLE_VNODES:
+        if self.dtest_config.use_vnodes:
             cluster.set_configuration_options(values={'num_tokens': 1024})
         num_disks = 5
         cluster.set_datadir_count(num_disks)
         cluster.set_configuration_options(values={'concurrent_compactors': num_disks})
 
-        debug("Starting 3 nodes with {} data dirs and concurrent_compactors".format(num_disks))
+        logger.debug("Starting 3 nodes with {} data dirs and concurrent_compactors".format(num_disks))
         cluster.populate(3).start(wait_for_binary_proto=True)
         node1 = cluster.nodelist()[0]
 
@@ -272,63 +279,63 @@ class TestDiskBalance(Tester):
         keys_to_write = num_flushes * keys_per_flush
 
         compaction_opts = "LeveledCompactionStrategy,sstable_size_in_mb=1" if lcs else "SizeTieredCompactionStrategy"
-        debug("Writing {} keys in {} flushes (compaction_opts={})".format(keys_to_write, num_flushes, compaction_opts))
+        logger.debug("Writing {} keys in {} flushes (compaction_opts={})".format(keys_to_write, num_flushes, compaction_opts))
         total_keys = num_flushes * keys_per_flush
         current_keys = 0
         while current_keys < total_keys:
             start_key = current_keys + 1
             end_key = current_keys + keys_per_flush
-            debug("Writing keys {}..{} and flushing".format(start_key, end_key))
+            logger.debug("Writing keys {}..{} and flushing".format(start_key, end_key))
             node1.stress(['write', 'n={}'.format(keys_per_flush), "no-warmup", "cl=ALL", "-pop",
                           "seq={}..{}".format(start_key, end_key), "-rate", "threads=1", "-schema", "replication(factor=1)",
                           "compaction(strategy={},enabled=false)".format(compaction_opts)])
             node1.nodetool('flush keyspace1 standard1')
             current_keys = end_key
 
-        debug("Stopping node1")
+        logger.debug("Stopping node1")
         node1.stop()
 
-        debug("Starting node1 without joining ring")
+        logger.debug("Starting node1 without joining ring")
         node1.start(wait_for_binary_proto=True, wait_other_notice=False, join_ring=False,
                     jvm_args=["-Dcassandra.load_ring_state=false", "-Dcassandra.write_survey=true"])
 
-        debug("Joining node1 to the ring")
+        logger.debug("Joining node1 to the ring")
         node1.nodetool("join")
         node1.nodetool("join")  # Need to run join twice - one to join ring, another to leave write survey mode
 
         self._assert_balanced_after_boundary_change(node1, total_keys, lcs)
 
     def _assert_balanced_after_boundary_change(self, node, total_keys, lcs):
-        debug("Cleanup {}".format(node.name))
+        logger.debug("Cleanup {}".format(node.name))
         node.cleanup()
 
-        debug("Enabling compactions on {} now that boundaries changed".format(node.name))
+        logger.debug("Enabling compactions on {} now that boundaries changed".format(node.name))
         node.nodetool('enableautocompaction')
 
-        debug("Waiting for compactions on {}".format(node.name))
+        logger.debug("Waiting for compactions on {}".format(node.name))
         node.wait_for_compactions()
 
-        debug("Disabling compactions on {} should not block forever".format(node.name))
+        logger.debug("Disabling compactions on {} should not block forever".format(node.name))
         node.nodetool('disableautocompaction')
 
-        debug("Major compact {} and check disks are balanced".format(node.name))
+        logger.debug("Major compact {} and check disks are balanced".format(node.name))
         node.compact()
 
         node.wait_for_compactions()
         self.assert_balanced(node)
 
-        debug("Reading data back ({} keys)".format(total_keys))
+        logger.debug("Reading data back ({} keys)".format(total_keys))
         node.stress(['read', 'n={}'.format(total_keys), "no-warmup", "cl=ALL", "-pop", "seq=1...{}".format(total_keys), "-rate", "threads=1"])
 
         if lcs:
             output = grep_sstables_in_each_level(node, "standard1")
-            debug("SSTables in each level: {}".format(output))
+            logger.debug("SSTables in each level: {}".format(output))
 
             # [0, ?/, 0, 0, 0, 0...]
             p = re.compile(r'(\d+)(/\d+)?,\s(\d+).*')
             m = p.search(output)
             cs_count = int(m.group(1)) + int(m.group(3))
             sstable_count = len(node.get_sstables('keyspace1', 'standard1'))
-            debug("Checking that compaction strategy sstable # ({}) is equal to actual # ({})".format(cs_count, sstable_count))
-            self.assertEqual(sstable_count, cs_count)
-            self.assertFalse(node.grep_log("is already present on leveled manifest"))
+            logger.debug("Checking that compaction strategy sstable # ({}) is equal to actual # ({})".format(cs_count, sstable_count))
+            assert sstable_count == cs_count
+            assert not node.grep_log("is already present on leveled manifest")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[25/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/materialized_views_test.py
----------------------------------------------------------------------
diff --git a/materialized_views_test.py b/materialized_views_test.py
index 22c69c8..8d38ee8 100644
--- a/materialized_views_test.py
+++ b/materialized_views_test.py
@@ -3,30 +3,34 @@ import re
 import sys
 import time
 import traceback
+import pytest
+import threading
+import logging
+
+from flaky import flaky
+from enum import Enum
+from queue import Empty
 from functools import partial
 from multiprocessing import Process, Queue
-from unittest import skip, skipIf
 
 from cassandra import ConsistencyLevel, InvalidRequest, WriteFailure
 from cassandra.cluster import NoHostAvailable
 from cassandra.concurrent import execute_concurrent_with_args
 from cassandra.cluster import Cluster
 from cassandra.query import SimpleStatement
-# TODO add in requirements.txt
-from enum import Enum  # Remove when switching to py3
-from nose.plugins.attrib import attr
-from nose.tools import (assert_equal)
 
 from distutils.version import LooseVersion
-from dtest import Tester, debug, get_ip_from_node, create_ks, supports_v5_protocol
+from dtest import Tester, get_ip_from_node, create_ks
 from tools.assertions import (assert_all, assert_crc_check_chance_equal,
                               assert_invalid, assert_none, assert_one,
                               assert_unavailable)
 from tools.data import rows_to_list
-from tools.decorators import since
 from tools.misc import new_node
 from tools.jmxutils import (JolokiaAgent, make_mbean, remove_perf_disable_shared_mem)
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 # CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
 # pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
 # for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
@@ -34,6 +38,7 @@ from tools.jmxutils import (JolokiaAgent, make_mbean, remove_perf_disable_shared
 MIGRATION_WAIT = 5
 
 
+@flaky
 @since('3.0')
 class TestMaterializedViews(Tester):
     """
@@ -80,7 +85,7 @@ class TestMaterializedViews(Tester):
             self.cluster.compact()
 
     def _settle_nodes(self):
-        debug("Settling all nodes")
+        logger.debug("Settling all nodes")
         stage_match = re.compile("(?P<name>\S+)\s+(?P<active>\d+)\s+(?P<pending>\d+)\s+(?P<completed>\d+)\s+(?P<blocked>\d+)\s+(?P<alltimeblocked>\d+)")
 
         def _settled_stages(node):
@@ -92,7 +97,7 @@ class TestMaterializedViews(Tester):
                     active = int(match.group('active'))
                     pending = int(match.group('pending'))
                     if active != 0 or pending != 0:
-                        debug("%s - pool %s still has %d active and %d pending" % (node.name, match.group("name"), active, pending))
+                        logger.debug("%s - pool %s still has %d active and %d pending" % (node.name, match.group("name"), active, pending))
                         return False
             return True
 
@@ -111,7 +116,7 @@ class TestMaterializedViews(Tester):
             return 'system.views_builds_in_progress'
 
     def _wait_for_view(self, ks, view):
-        debug("waiting for view")
+        logger.debug("waiting for view")
 
         def _view_build_finished(node):
             s = self.patient_exclusive_cql_connection(node)
@@ -137,7 +142,7 @@ class TestMaterializedViews(Tester):
                 query = "SELECT COUNT(*) FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
                         (self._build_progress_table(), ks, view)
                 result = list(session.execute(query))
-                self.assertEqual(result[0].count, 0)
+                assert 0 == result[0].count
             except AssertionError:
                 break
 
@@ -157,21 +162,20 @@ class TestMaterializedViews(Tester):
     def _replay_batchlogs(self):
         for node in self.cluster.nodelist():
             if node.is_running():
-                debug("Replaying batchlog on node {}".format(node.name))
+                logger.debug("Replaying batchlog on node {}".format(node.name))
                 node.nodetool("replaybatchlog")
                 # CASSANDRA-13069 - Ensure replayed mutations are removed from the batchlog
                 node_session = self.patient_exclusive_cql_connection(node)
                 result = list(node_session.execute("SELECT count(*) FROM system.batches;"))
-                self.assertEqual(result[0].count, 0)
+                assert result[0].count == 0
 
-    def create_test(self):
+    def test_create(self):
         """Test the materialized view creation"""
-
         session = self.prepare(user_table=True)
 
         result = list(session.execute(("SELECT * FROM system_schema.views "
                                        "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
-        self.assertEqual(len(result), 1, "Expecting 1 materialized view, got" + str(result))
+        assert len(result) == 1, "Expecting 1 materialized view == got" + str(result)
 
     def test_gcgs_validation(self):
         """Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
@@ -211,73 +215,69 @@ class TestMaterializedViews(Tester):
                        "updates. Setting gc_grace_seconds too low might cause undelivered updates"
                        " to expire before being replayed.")
 
-    def insert_test(self):
+    def test_insert(self):
         """Test basic insertions"""
-
         session = self.prepare(user_table=True)
 
         self._insert_data(session)
 
         result = list(session.execute("SELECT * FROM users;"))
-        self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
+        assert len(result) == 4, "Expecting {} users, got {}".format(4 == len(result))
 
         result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
-        self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
+        assert len(result) == 2, "Expecting {} users, got {}".format(2 == len(result))
 
         result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
-        self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
+        assert len(result) == 1, "Expecting {} users, got {}".format(1 == len(result))
 
         result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
-        self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
+        assert len(result) == 0, "Expecting {} users, got {}".format(0 == len(result))
 
-    def populate_mv_after_insert_test(self):
+    def test_populate_mv_after_insert(self):
         """Test that a view is OK when created with existing data"""
-
         session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
 
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
 
-        for i in xrange(1000):
+        for i in range(1000):
             session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
 
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
                          "AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("wait for view to build")
+        logger.debug("wait for view to build")
         self._wait_for_view("ks", "t_by_v")
 
-        debug("wait that all batchlogs are replayed")
+        logger.debug("wait that all batchlogs are replayed")
         self._replay_batchlogs()
 
-        for i in xrange(1000):
+        for i in range(1000):
             assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
 
-    def populate_mv_after_insert_wide_rows_test(self):
+    def test_populate_mv_after_insert_wide_rows(self):
         """Test that a view is OK when created with existing data with wide rows"""
-
         session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
 
         session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
 
-        for i in xrange(5):
-            for j in xrange(10000):
+        for i in range(5):
+            for j in range(10000):
                 session.execute("INSERT INTO t (id, v) VALUES ({}, {})".format(i, j))
 
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
                          "AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("wait for view to build")
+        logger.debug("wait for view to build")
         self._wait_for_view("ks", "t_by_v")
 
-        debug("wait that all batchlogs are replayed")
+        logger.debug("wait that all batchlogs are replayed")
         self._replay_batchlogs()
-        for i in xrange(5):
-            for j in xrange(10000):
+        for i in range(5):
+            for j in range(10000):
                 assert_one(session, "SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, j), [j, i])
 
-    def crc_check_chance_test(self):
+    def test_crc_check_chance(self):
         """Test that crc_check_chance parameter is properly populated after mv creation and update"""
-
         session = self.prepare()
 
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
@@ -290,9 +290,8 @@ class TestMaterializedViews(Tester):
 
         assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
 
-    def prepared_statement_test(self):
+    def test_prepared_statement(self):
         """Test basic insertions with prepared statement"""
-
         session = self.prepare(user_table=True)
 
         insertPrepared = session.prepare(
@@ -309,20 +308,19 @@ class TestMaterializedViews(Tester):
         session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
 
         result = list(session.execute("SELECT * FROM users;"))
-        self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
+        assert len(result) == 4, "Expecting {} users, got {}".format(4, len(result))
 
         result = list(session.execute(selectPrepared.bind(['TX'])))
-        self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
+        assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
 
         result = list(session.execute(selectPrepared.bind(['CA'])))
-        self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
+        assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
 
         result = list(session.execute(selectPrepared.bind(['MA'])))
-        self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
+        assert len(result) == 0, "Expecting {} users, got {}".format(0, len(result))
 
-    def immutable_test(self):
+    def test_immutable(self):
         """Test that a materialized view is immutable"""
-
         session = self.prepare(user_table=True)
 
         # cannot insert
@@ -345,9 +343,8 @@ class TestMaterializedViews(Tester):
         assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
                        "Cannot use ALTER TABLE on Materialized View")
 
-    def drop_mv_test(self):
+    def test_drop_mv(self):
         """Test that we can drop a view properly"""
-
         session = self.prepare(user_table=True)
 
         # create another materialized view
@@ -357,22 +354,21 @@ class TestMaterializedViews(Tester):
 
         result = list(session.execute(("SELECT * FROM system_schema.views "
                                        "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
-        self.assertEqual(len(result), 2, "Expecting {} materialized view, got {}".format(2, len(result)))
+        assert len(result) == 2, "Expecting {} materialized view, got {}".format(2, len(result))
 
         session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
 
         result = list(session.execute(("SELECT * FROM system_schema.views "
                                        "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
-        self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
+        assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
 
-    def drop_column_test(self):
+    def test_drop_column(self):
         """Test that we cannot drop a column if it is used by a MV"""
-
         session = self.prepare(user_table=True)
 
         result = list(session.execute(("SELECT * FROM system_schema.views "
                                        "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
-        self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
+        assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
 
         assert_invalid(
             session,
@@ -380,17 +376,13 @@ class TestMaterializedViews(Tester):
             "Cannot drop column state on base table with materialized views."
         )
 
-    def drop_table_test(self):
+    def test_drop_table(self):
         """Test that we cannot drop a table without deleting its MVs first"""
-
         session = self.prepare(user_table=True)
 
         result = list(session.execute(("SELECT * FROM system_schema.views "
                                        "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
-        self.assertEqual(
-            len(result), 1,
-            "Expecting {} materialized view, got {}".format(1, len(result))
-        )
+        assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
 
         assert_invalid(
             session,
@@ -400,24 +392,17 @@ class TestMaterializedViews(Tester):
 
         result = list(session.execute(("SELECT * FROM system_schema.views "
                                        "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
-        self.assertEqual(
-            len(result), 1,
-            "Expecting {} materialized view, got {}".format(1, len(result))
-        )
+        assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
 
         session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
         session.execute("DROP TABLE ks.users;")
 
         result = list(session.execute(("SELECT * FROM system_schema.views "
                                        "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
-        self.assertEqual(
-            len(result), 0,
-            "Expecting {} materialized view, got {}".format(1, len(result))
-        )
+        assert len(result) == 0, "Expecting {} materialized view, got {}".format(1, len(result))
 
-    def clustering_column_test(self):
+    def test_clustering_column(self):
         """Test that we can use clustering columns as primary key for a materialized view"""
-
         session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
 
         session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
@@ -434,10 +419,10 @@ class TestMaterializedViews(Tester):
         self._insert_data(session)
 
         result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
-        self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
+        assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
 
         result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
-        self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
+        assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
 
     def _add_dc_after_mv_test(self, rf):
         """
@@ -448,47 +433,47 @@ class TestMaterializedViews(Tester):
 
         session = self.prepare(rf=rf)
 
-        debug("Creating schema")
+        logger.debug("Creating schema")
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("Writing 1k to base")
-        for i in xrange(1000):
+        logger.debug("Writing 1k to base")
+        for i in range(1000):
             session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
 
-        debug("Reading 1k from view")
-        for i in xrange(1000):
+        logger.debug("Reading 1k from view")
+        for i in range(1000):
             assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
 
-        debug("Reading 1k from base")
-        for i in xrange(1000):
+        logger.debug("Reading 1k from base")
+        for i in range(1000):
             assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
 
-        debug("Bootstrapping new node in another dc")
+        logger.debug("Bootstrapping new node in another dc")
         node4 = new_node(self.cluster, data_center='dc2')
         node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
 
-        debug("Bootstrapping new node in another dc")
+        logger.debug("Bootstrapping new node in another dc")
         node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
         node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
 
         session2 = self.patient_exclusive_cql_connection(node4)
 
-        debug("Verifying data from new node in view")
-        for i in xrange(1000):
+        logger.debug("Verifying data from new node in view")
+        for i in range(1000):
             assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
 
-        debug("Inserting 100 into base")
-        for i in xrange(1000, 1100):
+        logger.debug("Inserting 100 into base")
+        for i in range(1000, 1100):
             session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
 
-        debug("Verify 100 in view")
-        for i in xrange(1000, 1100):
+        logger.debug("Verify 100 in view")
+        for i in range(1000, 1100):
             assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
 
-    @attr('resource-intensive')
-    def add_dc_after_mv_simple_replication_test(self):
+    @pytest.mark.resource_intensive
+    def test_add_dc_after_mv_simple_replication(self):
         """
         @jira_ticket CASSANDRA-10634
 
@@ -497,8 +482,8 @@ class TestMaterializedViews(Tester):
 
         self._add_dc_after_mv_test(1)
 
-    @attr('resource-intensive')
-    def add_dc_after_mv_network_replication_test(self):
+    @pytest.mark.resource_intensive
+    def test_add_dc_after_mv_network_replication(self):
         """
         @jira_ticket CASSANDRA-10634
 
@@ -507,8 +492,8 @@ class TestMaterializedViews(Tester):
 
         self._add_dc_after_mv_test({'dc1': 1, 'dc2': 1})
 
-    @attr('resource-intensive')
-    def add_node_after_mv_test(self):
+    @pytest.mark.resource_intensive
+    def test_add_node_after_mv(self):
         """
         @jira_ticket CASSANDRA-10978
 
@@ -521,10 +506,10 @@ class TestMaterializedViews(Tester):
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        for i in xrange(1000):
+        for i in range(1000):
             session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
 
-        for i in xrange(1000):
+        for i in range(1000):
             assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
 
         node4 = new_node(self.cluster)
@@ -539,17 +524,17 @@ class TestMaterializedViews(Tester):
         """
         assert_one(session2, "SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'", [1])
 
-        for i in xrange(1000):
+        for i in range(1000):
             assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
 
-        for i in xrange(1000, 1100):
+        for i in range(1000, 1100):
             session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
 
-        for i in xrange(1000, 1100):
+        for i in range(1000, 1100):
             assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
 
-    @attr('resource-intensive')
-    def add_node_after_wide_mv_with_range_deletions_test(self):
+    @pytest.mark.resource_intensive
+    def test_add_node_after_wide_mv_with_range_deletions(self):
         """
         @jira_ticket CASSANDRA-11670
 
@@ -562,26 +547,26 @@ class TestMaterializedViews(Tester):
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        for i in xrange(10):
-            for j in xrange(100):
+        for i in range(10):
+            for j in range(100):
                 session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
 
         self.cluster.flush()
 
-        for i in xrange(10):
-            for j in xrange(100):
+        for i in range(10):
+            for j in range(100):
                 assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
                 assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
 
-        for i in xrange(10):
-            for j in xrange(100):
+        for i in range(10):
+            for j in range(100):
                 if j % 10 == 0:
                     session.execute("DELETE FROM t WHERE id = {} AND v >= {} and v < {}".format(i, j, j + 2))
 
         self.cluster.flush()
 
-        for i in xrange(10):
-            for j in xrange(100):
+        for i in range(10):
+            for j in range(100):
                 if j % 10 == 0 or (j - 1) % 10 == 0:
                     assert_none(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j))
                     assert_none(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j))
@@ -591,13 +576,13 @@ class TestMaterializedViews(Tester):
 
         node4 = new_node(self.cluster)
         node4.set_configuration_options(values={'max_mutation_size_in_kb': 20})  # CASSANDRA-11670
-        debug("Start join at {}".format(time.strftime("%H:%M:%S")))
+        logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
         node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
 
         session2 = self.patient_exclusive_cql_connection(node4)
 
-        for i in xrange(10):
-            for j in xrange(100):
+        for i in range(10):
+            for j in range(100):
                 if j % 10 == 0 or (j - 1) % 10 == 0:
                     assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
                     assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
@@ -605,12 +590,12 @@ class TestMaterializedViews(Tester):
                     assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
                     assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
 
-        for i in xrange(10):
-            for j in xrange(100, 110):
+        for i in range(10):
+            for j in range(100, 110):
                 session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
 
-        for i in xrange(10):
-            for j in xrange(110):
+        for i in range(10):
+            for j in range(110):
                 if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):
                     assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
                     assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
@@ -618,8 +603,8 @@ class TestMaterializedViews(Tester):
                     assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
                     assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
 
-    @attr('resource-intensive')
-    def add_node_after_very_wide_mv_test(self):
+    @pytest.mark.resource_intensive
+    def test_add_node_after_very_wide_mv(self):
         """
         @jira_ticket CASSANDRA-11670
 
@@ -632,37 +617,37 @@ class TestMaterializedViews(Tester):
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        for i in xrange(5):
-            for j in xrange(5000):
+        for i in range(5):
+            for j in range(5000):
                 session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
 
         self.cluster.flush()
 
-        for i in xrange(5):
-            for j in xrange(5000):
+        for i in range(5):
+            for j in range(5000):
                 assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
 
         node4 = new_node(self.cluster)
         node4.set_configuration_options(values={'max_mutation_size_in_kb': 20})  # CASSANDRA-11670
-        debug("Start join at {}".format(time.strftime("%H:%M:%S")))
+        logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
         node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
 
         session2 = self.patient_exclusive_cql_connection(node4)
 
-        for i in xrange(5):
-            for j in xrange(5000):
+        for i in range(5):
+            for j in range(5000):
                 assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
 
-        for i in xrange(5):
-            for j in xrange(5100):
+        for i in range(5):
+            for j in range(5100):
                 session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
 
-        for i in xrange(5):
-            for j in xrange(5100):
+        for i in range(5):
+            for j in range(5100):
                 assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
 
-    @attr('resource-intensive')
-    def add_write_survey_node_after_mv_test(self):
+    @pytest.mark.resource_intensive
+    def test_add_write_survey_node_after_mv(self):
         """
         @jira_ticket CASSANDRA-10621
         @jira_ticket CASSANDRA-10978
@@ -676,24 +661,23 @@ class TestMaterializedViews(Tester):
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        for i in xrange(1000):
+        for i in range(1000):
             session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
 
-        for i in xrange(1000):
+        for i in range(1000):
             assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
 
         node4 = new_node(self.cluster)
         node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
 
-        for i in xrange(1000, 1100):
+        for i in range(1000, 1100):
             session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
 
-        for i in xrange(1100):
+        for i in range(1100):
             assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
 
-    def allow_filtering_test(self):
+    def test_allow_filtering(self):
         """Test that allow filtering works as usual for a materialized view"""
-
         session = self.prepare()
 
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
@@ -702,19 +686,19 @@ class TestMaterializedViews(Tester):
         session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
                          "WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
 
-        for i in xrange(1000):
+        for i in range(1000):
             session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
 
-        for i in xrange(1000):
+        for i in range(1000):
             assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
 
         rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
-        self.assertEqual(len(rows), 1000, "Expected 1000 rows but got {}".format(len(rows)))
+        assert len(rows) == 1000, "Expected 1000 rows but got {}".format(len(rows))
 
         assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
         assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
 
-        for i in xrange(1000):
+        for i in range(1000):
             assert_one(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
@@ -726,9 +710,8 @@ class TestMaterializedViews(Tester):
                 ['a', i, i, 3.0]
             )
 
-    def secondary_index_test(self):
+    def test_secondary_index(self):
         """Test that secondary indexes cannot be created on a materialized view"""
-
         session = self.prepare()
 
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
@@ -737,34 +720,32 @@ class TestMaterializedViews(Tester):
         assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
                        "Secondary indexes are not supported on materialized views")
 
-    def ttl_test(self):
+    def test_ttl(self):
         """
         Test that TTL works as expected for a materialized view
         @expected_result The TTL is propagated properly between tables.
         """
-
         session = self.prepare()
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
                          "WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
 
-        for i in xrange(100):
+        for i in range(100):
             session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
 
-        for i in xrange(100):
+        for i in range(100):
             assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
 
         time.sleep(20)
 
         rows = list(session.execute("SELECT * FROM t_by_v2"))
-        self.assertEqual(len(rows), 0, "Expected 0 rows but got {}".format(len(rows)))
+        assert len(rows) == 0, "Expected 0 rows but got {}".format(len(rows))
 
-    def query_all_new_column_test(self):
+    def test_query_all_new_column(self):
         """
         Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
         @expected_result The new column is present in the view.
         """
-
         session = self.prepare(user_table=True)
 
         self._insert_data(session)
@@ -778,20 +759,19 @@ class TestMaterializedViews(Tester):
         session.execute("ALTER TABLE users ADD first_name varchar;")
 
         results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
-        self.assertEqual(len(results), 1)
-        self.assertTrue(hasattr(results[0], 'first_name'), 'Column "first_name" not found')
+        assert len(results) == 1
+        assert hasattr(results[0], 'first_name'), 'Column "first_name" not found'
         assert_one(
             session,
             "SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
             ['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
         )
 
-    def query_new_column_test(self):
+    def test_query_new_column(self):
         """
         Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
         @expected_result The new column is not present in the view.
         """
-
         session = self.prepare(user_table=True)
 
         session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT username FROM users "
@@ -808,20 +788,19 @@ class TestMaterializedViews(Tester):
         session.execute("ALTER TABLE users ADD first_name varchar;")
 
         results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
-        self.assertEqual(len(results), 1)
-        self.assertFalse(hasattr(results[0], 'first_name'), 'Column "first_name" found in view')
+        assert len(results) == 1
+        assert not hasattr(results[0], 'first_name'), 'Column "first_name" found in view'
         assert_one(
             session,
             "SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
             ['TX', 'user1']
         )
 
-    def rename_column_test(self):
+    def test_rename_column(self):
         """
         Test that a materialized view created with a 'SELECT *' works as expected when renaming a column
         @expected_result The column is also renamed in the view.
         """
-
         session = self.prepare(user_table=True)
 
         self._insert_data(session)
@@ -835,20 +814,19 @@ class TestMaterializedViews(Tester):
         session.execute("ALTER TABLE users RENAME username TO user")
 
         results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND user = 'user1'"))
-        self.assertEqual(len(results), 1)
-        self.assertTrue(hasattr(results[0], 'user'), 'Column "user" not found')
+        assert len(results) == 1
+        assert hasattr(results[0], 'user'), 'Column "user" not found'
         assert_one(
             session,
             "SELECT state, user, birth_year, gender FROM users_by_state WHERE state = 'TX' AND user = 'user1'",
             ['TX', 'user1', 1968, 'f']
         )
 
-    def rename_column_atomicity_test(self):
+    def test_rename_column_atomicity(self):
         """
         Test that column renaming is atomically done between a table and its materialized views
         @jira_ticket CASSANDRA-12952
         """
-
         session = self.prepare(nodes=1, user_table=True, install_byteman=True)
         node = self.cluster.nodelist()[0]
 
@@ -861,13 +839,13 @@ class TestMaterializedViews(Tester):
         )
 
         # Rename a column with an injected byteman rule to kill the node after the first schema update
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
         script_version = '4x' if self.cluster.version() >= '4' else '3x'
         node.byteman_submit(['./byteman/merge_schema_failure_{}.btm'.format(script_version)])
-        with self.assertRaises(NoHostAvailable):
+        with pytest.raises(NoHostAvailable):
             session.execute("ALTER TABLE users RENAME username TO user")
 
-        debug('Restarting node')
+        logger.debug('Restarting node')
         node.stop()
         node.start(wait_for_binary_proto=True)
         session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
@@ -884,58 +862,57 @@ class TestMaterializedViews(Tester):
             ['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
         )
 
-    def lwt_test(self):
+    def test_lwt(self):
         """Test that lightweight transaction behave properly with a materialized view"""
-
         session = self.prepare()
 
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("Inserting initial data using IF NOT EXISTS")
-        for i in xrange(1000):
+        logger.debug("Inserting initial data using IF NOT EXISTS")
+        for i in range(1000):
             session.execute(
                 "INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
             )
         self._replay_batchlogs()
 
-        debug("All rows should have been inserted")
-        for i in xrange(1000):
+        logger.debug("All rows should have been inserted")
+        for i in range(1000):
             assert_one(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
                 [i, i, 'a', 3.0]
             )
 
-        debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
-        for i in xrange(1000):
+        logger.debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
+        for i in range(1000):
             v = i * 2
             session.execute(
                 "INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
             )
         self._replay_batchlogs()
 
-        debug("No rows should have changed")
-        for i in xrange(1000):
+        logger.debug("No rows should have changed")
+        for i in range(1000):
             assert_one(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
                 [i, i, 'a', 3.0]
             )
 
-        debug("Update the 10 first rows with a different value")
-        for i in xrange(1000):
+        logger.debug("Update the 10 first rows with a different value")
+        for i in range(1000):
             v = i + 2000
             session.execute(
                 "UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
             )
         self._replay_batchlogs()
 
-        debug("Verify that only the 10 first rows changed.")
+        logger.debug("Verify that only the 10 first rows changed.")
         results = list(session.execute("SELECT * FROM t_by_v;"))
-        self.assertEqual(len(results), 1000)
-        for i in xrange(1000):
+        assert len(results) == 1000
+        for i in range(1000):
             v = i + 2000 if i < 10 else i
             assert_one(
                 session,
@@ -943,18 +920,18 @@ class TestMaterializedViews(Tester):
                 [v, i, 'a', 3.0]
             )
 
-        debug("Deleting the first 10 rows")
-        for i in xrange(1000):
+        logger.debug("Deleting the first 10 rows")
+        for i in range(1000):
             v = i + 2000
             session.execute(
                 "DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
             )
         self._replay_batchlogs()
 
-        debug("Verify that only the 10 first rows have been deleted.")
+        logger.debug("Verify that only the 10 first rows have been deleted.")
         results = list(session.execute("SELECT * FROM t_by_v;"))
-        self.assertEqual(len(results), 990)
-        for i in xrange(10, 1000):
+        assert len(results) == 990
+        for i in range(10, 1000):
             assert_one(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
@@ -971,7 +948,7 @@ class TestMaterializedViews(Tester):
         session = self.prepare(options=options, install_byteman=True)
         node1, node2, node3 = self.cluster.nodelist()
 
-        debug("Avoid premature MV build finalization with byteman")
+        logger.debug("Avoid premature MV build finalization with byteman")
         for node in self.cluster.nodelist():
             if self.cluster.version() >= '4':
                 node.byteman_submit(['./byteman/4.0/skip_view_build_finalization.btm'])
@@ -982,42 +959,42 @@ class TestMaterializedViews(Tester):
 
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
 
-        debug("Inserting initial data")
-        for i in xrange(10000):
+        logger.debug("Inserting initial data")
+        for i in range(10000):
             session.execute(
                 "INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
             )
 
-        debug("Create a MV")
+        logger.debug("Create a MV")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
+        logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
         self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
 
-        debug("Stop the cluster. Interrupt the MV build process.")
+        logger.debug("Stop the cluster. Interrupt the MV build process.")
         self.cluster.stop()
 
-        debug("Checking logs to verify that the view build tasks have been created")
+        logger.debug("Checking logs to verify that the view build tasks have been created")
         for node in self.cluster.nodelist():
-            self.assertTrue(node.grep_log('Starting new view build', filename='debug.log'))
-            self.assertFalse(node.grep_log('Resuming view build', filename='debug.log'))
+            assert node.grep_log('Starting new view build', filename='debug.log')
+            assert not node.grep_log('Resuming view build', filename='debug.log')
             node.mark_log(filename='debug.log')
 
-        debug("Restart the cluster")
+        logger.debug("Restart the cluster")
         self.cluster.start(wait_for_binary_proto=True)
         session = self.patient_cql_connection(node1)
         session.execute("USE ks")
 
-        debug("MV shouldn't be built yet.")
-        self.assertNotEqual(len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))), 10000)
+        logger.debug("MV shouldn't be built yet.")
+        assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 10000
 
-        debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
+        logger.debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
         self._wait_for_view("ks", "t_by_v")
 
-        debug("Verify all data")
+        logger.debug("Verify all data")
         assert_one(session, "SELECT COUNT(*) FROM t_by_v", [10000])
-        for i in xrange(10000):
+        for i in range(10000):
             assert_one(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
@@ -1025,10 +1002,11 @@ class TestMaterializedViews(Tester):
                 cl=ConsistencyLevel.ALL
             )
 
-        debug("Checking logs to verify that some view build tasks have been resumed")
+        logger.debug("Checking logs to verify that some view build tasks have been resumed")
         for node in self.cluster.nodelist():
-            self.assertTrue(node.grep_log('Resuming view build', filename='debug.log'))
+            assert node.grep_log('Resuming view build', filename='debug.log')
 
+    @pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
     @since('4.0')
     def test_drop_while_building(self):
         """Test that a parallel MV build is interrupted when the view is removed"""
@@ -1036,31 +1014,31 @@ class TestMaterializedViews(Tester):
         session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
 
-        debug("Inserting initial data")
-        for i in xrange(5000):
+        logger.debug("Inserting initial data")
+        for i in range(5000):
             session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
 
-        debug("Slowing down MV build with byteman")
+        logger.debug("Slowing down MV build with byteman")
         for node in self.cluster.nodelist():
             node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
 
-        debug("Create a MV")
+        logger.debug("Create a MV")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
+        logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
         self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
 
-        debug("Drop the MV while it is still building")
+        logger.debug("Drop the MV while it is still building")
         session.execute("DROP MATERIALIZED VIEW t_by_v")
 
-        debug("Verify that the build has been stopped before its finalization without errors")
+        logger.debug("Verify that the build has been stopped before its finalization without errors")
         for node in self.cluster.nodelist():
             self.check_logs_for_errors()
-            self.assertFalse(node.grep_log('Marking view', filename='debug.log'))
-            self.assertTrue(node.grep_log('Stopping current view builder due to schema change', filename='debug.log'))
+            assert not node.grep_log('Marking view', filename='debug.log')
+            assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
 
-        debug("Verify that the view has been removed")
+        logger.debug("Verify that the view has been removed")
         failed = False
         try:
             session.execute("SELECT COUNT(*) FROM t_by_v")
@@ -1068,11 +1046,11 @@ class TestMaterializedViews(Tester):
             failed = True
         self.assertTrue(failed, "The view shouldn't be queryable")
 
-        debug("Create the MV again")
+        logger.debug("Create the MV again")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("Verify that the MV has been successfully created")
+        logger.debug("Verify that the MV has been successfully created")
         self._wait_for_view('ks', 't_by_v')
         assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
 
@@ -1084,54 +1062,54 @@ class TestMaterializedViews(Tester):
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
         nodes = self.cluster.nodelist()
 
-        debug("Inserting initial data")
-        for i in xrange(5000):
+        logger.debug("Inserting initial data")
+        for i in range(5000):
             session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
 
-        debug("Slowing down MV build with byteman")
+        logger.debug("Slowing down MV build with byteman")
         for node in nodes:
             node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
 
-        debug("Create a MV")
+        logger.debug("Create a MV")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
+        logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
         self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
 
-        debug("Stopping all running view build tasks with nodetool")
+        logger.debug("Stopping all running view build tasks with nodetool")
         for node in nodes:
-            node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=60)
+            node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
             node.nodetool('stop VIEW_BUILD')
 
-        debug("Checking logs to verify that some view build tasks have been stopped")
+        logger.debug("Checking logs to verify that some view build tasks have been stopped")
         for node in nodes:
-            node.watch_log_for('Stopped build for view', filename='debug.log', timeout=60)
-            node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=60)
+            node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
+            node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
             self.check_logs_for_errors()
 
-        debug("Drop the MV while it is still building")
+        logger.debug("Drop the MV while it is still building")
         session.execute("DROP MATERIALIZED VIEW t_by_v")
 
-        debug("Verify that the build has been stopped before its finalization without errors")
+        logger.debug("Verify that the build has been stopped before its finalization without errors")
         for node in nodes:
             self.check_logs_for_errors()
-            self.assertFalse(node.grep_log('Marking view', filename='debug.log'))
-            self.assertTrue(node.grep_log('Stopping current view builder due to schema change', filename='debug.log'))
+            assert not node.grep_log('Marking view', filename='debug.log')
+            assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
 
-        debug("Verify that the view has been removed")
+        logger.debug("Verify that the view has been removed")
         failed = False
         try:
             session.execute("SELECT COUNT(*) FROM t_by_v")
         except InvalidRequest:
             failed = True
-        self.assertTrue(failed, "The view shouldn't be queryable")
+        assert failed, "The view shouldn't be queryable"
 
-        debug("Create the MV again")
+        logger.debug("Create the MV again")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("Verify that the MV has been successfully created")
+        logger.debug("Verify that the MV has been successfully created")
         self._wait_for_view('ks', 't_by_v')
         assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
 
@@ -1143,51 +1121,51 @@ class TestMaterializedViews(Tester):
         session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
         nodes = self.cluster.nodelist()
 
-        debug("Inserting initial data")
-        for i in xrange(5000):
+        logger.debug("Inserting initial data")
+        for i in range(5000):
             session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
 
-        debug("Slowing down MV build with byteman")
+        logger.debug("Slowing down MV build with byteman")
         for node in nodes:
             node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
 
-        debug("Create a MV")
+        logger.debug("Create a MV")
         session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                          "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
 
-        debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
+        logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
         self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
 
-        debug("Stopping all running view build tasks with nodetool")
+        logger.debug("Stopping all running view build tasks with nodetool")
         for node in nodes:
-            node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=60)
+            node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
             node.nodetool('stop VIEW_BUILD')
 
-        debug("Checking logs to verify that some view build tasks have been stopped")
+        logger.debug("Checking logs to verify that some view build tasks have been stopped")
         for node in nodes:
-            node.watch_log_for('Stopped build for view', filename='debug.log', timeout=60)
-            node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=60)
-            node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=60)
-            self.assertFalse(node.grep_log('Marking view', filename='debug.log'))
+            node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
+            node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
+            node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)
+            assert not node.grep_log('Marking view', filename='debug.log')
             self.check_logs_for_errors()
 
-        debug("Check that MV shouldn't be built yet.")
-        self.assertNotEqual(len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))), 5000)
+        logger.debug("Check that MV shouldn't be built yet.")
+        assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 5000
 
-        debug("Restart the cluster")
+        logger.debug("Restart the cluster")
         self.cluster.stop()
         marks = [node.mark_log() for node in nodes]
         self.cluster.start(wait_for_binary_proto=True)
         session = self.patient_cql_connection(nodes[0])
 
-        debug("Verify that the MV has been successfully created")
+        logger.debug("Verify that the MV has been successfully created")
         self._wait_for_view('ks', 't_by_v')
         assert_one(session, "SELECT COUNT(*) FROM ks.t_by_v", [5000])
 
-        debug("Checking logs to verify that the view build has been resumed and completed after restart")
+        logger.debug("Checking logs to verify that the view build has been resumed and completed after restart")
         for node, mark in zip(nodes, marks):
-            self.assertTrue(node.grep_log('Resuming view build', filename='debug.log', from_mark=mark))
-            self.assertTrue(node.grep_log('Marking view', filename='debug.log', from_mark=mark))
+            assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)
+            assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)
             self.check_logs_for_errors()
 
     @since('3.0')
@@ -1207,7 +1185,7 @@ class TestMaterializedViews(Tester):
         node1, node2, node3 = self.cluster.nodelist()
         session.execute('USE ks')
 
-        debug("MV with same key and unselected columns")
+        logger.debug("MV with same key and unselected columns")
         session.execute("CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600")
         session.execute(("CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 "
                          "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)"))
@@ -1250,7 +1228,7 @@ class TestMaterializedViews(Tester):
             assert_none(session, "SELECT * FROM t2")
             assert_none(session, "SELECT * FROM mv2")
 
-        debug("MV with extra key")
+        logger.debug("MV with extra key")
         session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600")
         session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
                          "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
@@ -1291,10 +1269,12 @@ class TestMaterializedViews(Tester):
             assert_one(session, "SELECT * FROM t", [1, 6, 1])
             assert_one(session, "SELECT * FROM mv", [1, 6, 1])
 
+    @flaky
     @since('3.0')
     def test_no_base_column_in_view_pk_complex_timestamp_with_flush(self):
         self._test_no_base_column_in_view_pk_complex_timestamp(flush=True)
 
+    @pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
     @since('3.0')
     def test_no_base_column_in_view_pk_complex_timestamp_without_flush(self):
         self._test_no_base_column_in_view_pk_complex_timestamp(flush=False)
@@ -1475,9 +1455,9 @@ class TestMaterializedViews(Tester):
         assert_one(session, "SELECT k,a,b FROM mv WHERE k = 2", [2, 2, 2])
 
         # stop node2, node3
-        debug('Shutdown node2')
+        logger.debug('Shutdown node2')
         node2.stop(wait_other_notice=True)
-        debug('Shutdown node3')
+        logger.debug('Shutdown node3')
         node3.stop(wait_other_notice=True)
         # shadow a = 1, create a = 2
         query = SimpleStatement("UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1", consistency_level=ConsistencyLevel.ONE)
@@ -1486,33 +1466,33 @@ class TestMaterializedViews(Tester):
         query = SimpleStatement("UPDATE t USING TTL 3 SET a = 2 WHERE k = 2", consistency_level=ConsistencyLevel.ONE)
         self.update_view(session, query, flush)
 
-        debug('Starting node2')
+        logger.debug('Starting node2')
         node2.start(wait_other_notice=True, wait_for_binary_proto=True)
-        debug('Starting node3')
+        logger.debug('Starting node3')
         node3.start(wait_other_notice=True, wait_for_binary_proto=True)
 
         # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired
         query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 1", consistency_level=ConsistencyLevel.ALL)
         result = session.execute(query, trace=True)
         self.check_trace_events(result.get_query_trace(), True)
-        self.assertEqual(0, len(result.current_rows))
+        assert 0 == len(result.current_rows)
 
         # For k = 1 & a = 1, second time no digest mismatch
         result = session.execute(query, trace=True)
         self.check_trace_events(result.get_query_trace(), False)
         assert_none(session, "SELECT * FROM mv WHERE k = 1 AND a = 1")
-        self.assertEqual(0, len(result.current_rows))
+        assert 0 == len(result.current_rows)
 
         # For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2
         query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 2", consistency_level=ConsistencyLevel.ALL)
         result = session.execute(query, trace=True)
         self.check_trace_events(result.get_query_trace(), True)
-        self.assertEqual(1, len(result.current_rows))
+        assert 1 == len(result.current_rows)
 
         # For k = 1 & a = 2, second time no digest mismatch
         result = session.execute(query, trace=True)
         self.check_trace_events(result.get_query_trace(), False)
-        self.assertEqual(1, len(result.current_rows))
+        assert 1 == len(result.current_rows)
         assert_one(session, "SELECT k,a,b,writetime(b) FROM mv WHERE k = 1", [1, 2, 1, 20])
 
         time.sleep(3)
@@ -1520,13 +1500,13 @@ class TestMaterializedViews(Tester):
         query = SimpleStatement("SELECT * FROM mv WHERE k = 2 AND a = 2", consistency_level=ConsistencyLevel.ALL)
         result = session.execute(query, trace=True)
         self.check_trace_events(result.get_query_trace(), True)
-        debug(result.current_rows)
-        self.assertEqual(0, len(result.current_rows))
+        logger.debug(result.current_rows)
+        assert 0 == len(result.current_rows)
 
         # For k = 2 & a = 2, second time no digest mismatch
         result = session.execute(query, trace=True)
         self.check_trace_events(result.get_query_trace(), False)
-        self.assertEqual(0, len(result.current_rows))
+        assert 0 == len(result.current_rows)
 
     @since('3.0')
     def test_expired_liveness_with_limit_rf1_nodes1(self):
@@ -1555,11 +1535,11 @@ class TestMaterializedViews(Tester):
                          "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
         session.cluster.control_connection.wait_for_schema_agreement()
 
-        for k in xrange(100):
+        for k in range(100):
             session.execute("INSERT INTO t (k, a, b) VALUES ({}, {}, {})".format(k, k, k))
 
         # generate view row with expired liveness except for row 50 and 99
-        for k in xrange(100):
+        for k in range(100):
             if k == 50 or k == 99:
                 continue
             session.execute("DELETE a FROM t where k = {};".format(k))
@@ -1570,7 +1550,7 @@ class TestMaterializedViews(Tester):
         assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
 
         # verify IN
-        keys = xrange(100)
+        keys = range(100)
         assert_one(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 1".format(', '.join(str(x) for x in keys)),
                    [50, 50, 50])
         assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 2".format(', '.join(str(x) for x in keys)),
@@ -1642,7 +1622,7 @@ class TestMaterializedViews(Tester):
         assert_none(session, "SELECT * FROM t_by_v")
         assert_one(session, "SELECT * FROM t", [1, None, None, None])
 
-    def view_tombstone_test(self):
+    def test_view_tombstone(self):
         """
         Test that a materialized views properly tombstone
 
@@ -1690,7 +1670,7 @@ class TestMaterializedViews(Tester):
 
         assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
 
-        debug('Shutdown node2')
+        logger.debug('Shutdown node2')
         node2.stop(wait_other_notice=True)
 
         session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1",
@@ -1746,10 +1726,10 @@ class TestMaterializedViews(Tester):
             if expect_digest:
                 self.fail("Didn't find digest mismatch")
 
-    def simple_repair_test_by_base(self):
+    def test_simple_repair_by_base(self):
         self._simple_repair_test(repair_base=True)
 
-    def simple_repair_test_by_view(self):
+    def test_simple_repair_by_view(self):
         self._simple_repair_test(repair_view=True)
 
     def _simple_repair_test(self, repair_base=False, repair_view=False):
@@ -1766,24 +1746,24 @@ class TestMaterializedViews(Tester):
 
         session.cluster.control_connection.wait_for_schema_agreement()
 
-        debug('Shutdown node2')
+        logger.debug('Shutdown node2')
         node2.stop(wait_other_notice=True)
 
-        for i in xrange(1000):
+        for i in range(1000):
             session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
 
         self._replay_batchlogs()
 
-        debug('Verify the data in the MV with CL=ONE')
-        for i in xrange(1000):
+        logger.debug('Verify the data in the MV with CL=ONE')
+        for i in range(1000):
             assert_one(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
                 [i, i, 'a', 3.0]
             )
 
-        debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
-        for i in xrange(1000):
+        logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
+        for i in range(1000):
             statement = SimpleStatement(
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
                 consistency_level=ConsistencyLevel.ALL
@@ -1794,27 +1774,27 @@ class TestMaterializedViews(Tester):
                 statement
             )
 
-        debug('Start node2, and repair')
+        logger.debug('Start node2, and repair')
         node2.start(wait_other_notice=True, wait_for_binary_proto=True)
         if repair_base:
             node1.nodetool("repair ks t")
         if repair_view:
             node1.nodetool("repair ks t_by_v")
 
-        debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')
-        for i in xrange(1000):
+        logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')
+        for i in range(1000):
             query = SimpleStatement(
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
                 consistency_level=ConsistencyLevel.ALL
             )
             result = session.execute(query, trace=True)
             self.check_trace_events(result.get_query_trace(), False)
-            self.assertEquals(self._rows_to_list(result.current_rows), [[i, i, 'a', 3.0]])
+            assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]
 
-    def base_replica_repair_test(self):
+    def test_base_replica_repair(self):
         self._base_replica_repair_test()
 
-    def base_replica_repair_with_contention_test(self):
+    def test_base_replica_repair_with_contention(self):
         """
         Test repair does not fail when there is MV lock contention
         @jira_ticket CASSANDRA-12905
@@ -1837,14 +1817,14 @@ class TestMaterializedViews(Tester):
 
         session.cluster.control_connection.wait_for_schema_agreement()
 
-        debug('Write initial data')
-        for i in xrange(1000):
+        logger.debug('Write initial data')
+        for i in range(1000):
             session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
 
         self._replay_batchlogs()
 
-        debug('Verify the data in the MV with CL=ALL')
-        for i in xrange(1000):
+        logger.debug('Verify the data in the MV with CL=ALL')
+        for i in range(1000):
             assert_one(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
@@ -1852,9 +1832,9 @@ class TestMaterializedViews(Tester):
                 cl=ConsistencyLevel.ALL
             )
 
-        debug('Shutdown node1')
+        logger.debug('Shutdown node1')
         node1.stop(wait_other_notice=True)
-        debug('Delete node1 data')
+        logger.debug('Delete node1 data')
         node1.clear(clear_all=True)
 
         jvm_args = []
@@ -1864,44 +1844,43 @@ class TestMaterializedViews(Tester):
             jvm_args.append("-Dcassandra.test.fail_mv_locks_count=1000")
             # this should not make Keyspace.apply throw WTE on failure to acquire lock
             node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})
-        debug('Restarting node1 with jvm_args={}'.format(jvm_args))
+        logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))
         node1.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=jvm_args)
-        debug('Shutdown node2 and node3')
+        logger.debug('Shutdown node2 and node3')
         node2.stop(wait_other_notice=True)
         node3.stop(wait_other_notice=True)
 
         session = self.patient_exclusive_cql_connection(node1)
         session.execute('USE ks')
 
-        debug('Verify that there is no data on node1')
-        for i in xrange(1000):
+        logger.debug('Verify that there is no data on node1')
+        for i in range(1000):
             assert_none(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {}".format(i)
             )
 
-        debug('Restarting node2 and node3')
+        logger.debug('Restarting node2 and node3')
         node2.start(wait_other_notice=True, wait_for_binary_proto=True)
         node3.start(wait_other_notice=True, wait_for_binary_proto=True)
 
         # Just repair the base replica
-        debug('Starting repair on node1')
+        logger.debug('Starting repair on node1')
         node1.nodetool("repair ks t")
 
-        debug('Verify data with cl=ALL')
-        for i in xrange(1000):
+        logger.debug('Verify data with cl=ALL')
+        for i in range(1000):
             assert_one(
                 session,
                 "SELECT * FROM t_by_v WHERE v = {}".format(i),
                 [i, i, 'a', 3.0]
             )
 
-    @attr("resource-intensive")
-    def complex_repair_test(self):
+    @pytest.mark.resource_intensive
+    def test_complex_repair(self):
         """
         Test that a materialized view are consistent after a more complex repair.
         """
-
         session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
         node1, node2, node3, node4, node5 = self.cluster.nodelist()
 
@@ -1913,49 +1892,49 @@ class TestMaterializedViews(Tester):
 
         session.cluster.control_connection.wait_for_schema_agreement()
 
-        debug('Shutdown node2 and node3')
+        logger.debug('Shutdown node2 and node3')
         node2.stop()
         node3.stop(wait_other_notice=True)
 
-        debug('Write initial data to node1 (will be replicated to node4 and node5)')
-        for i in xrange(1000):
+        logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')
+        for i in range(1000):
             session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
 
-        debug('Verify the data in the MV on node1 with CL=ONE')
-        for i in xrange(1000):
+        logger.debug('Verify the data in the MV on node1 with CL=ONE')
+        for i in range(1000):
             assert_one(
                 session,
                 "SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
                 [i, i, 'a', 3.0]
             )
 
-        debug('Close connection to node1')
+        logger.debug('Close connection to node1')
         session.cluster.shutdown()
-        debug('Shutdown node1, node4 and node5')
+        logger.debug('Shutdown node1, node4 and node5')
         node1.stop()
         node4.stop()
         node5.stop()
 
-        debug('Start nodes 2 and 3')
+        logger.debug('Start nodes 2 and 3')
         node2.start()
         node3.start(wait_other_notice=True, wait_for_binary_proto=True)
 
         session2 = self.patient_cql_connection(node2)
 
-        debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
-        for i in xrange(1000):
+        logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
+        for i in range(1000):
             assert_none(
                 session2,
                 "SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
             )
 
-        debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
-        for i in xrange(1000):
+        logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
+        for i in range(1000):
             # we write i*2 as value, instead of i
             session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
 
-        debug('Verify the new data in the MV on node2 with CL=ONE')
-        for i in xrange(1000):
+        logger.debug('Verify the new data in the MV on node2 with CL=ONE')
+        for i in range(1000):
             v = i * 2
             assert_one(
                 session2,
@@ -1963,18 +1942,18 @@ class TestMaterializedViews(Tester):
                 [v, v, 'a', 3.0]
             )
 
-        debug('Wait for batchlogs to expire from node2 and node3')
+        logger.debug('Wait for batchlogs to expire from node2 and node3')
         time.sleep(5)
 
-        debug('Start remaining nodes')
+        logger.debug('Start remaining nodes')
         node1.start(wait_other_notice=True, wait_for_binary_proto=True)
         node4.start(wait_other_notice=True, wait_for_binary_proto=True)
         node5.start(wait_other_notice=True, wait_for_binary_proto=True)
 
         session = self.patient_cql_connection(node1)
 
-        debug('Read data from MV at QUORUM (old data should be returned)')
-        for i in xrange(1000):
+        logger.debug('Read data from MV at QUORUM (old data should be returned)')
+        for i in range(1000):
             assert_one(
                 session,
                 "SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
@@ -1982,11 +1961,11 @@ class TestMaterializedViews(Tester):
                 cl=ConsistencyLevel.QUORUM
             )
 
-        debug('Run global repair on node1')
+        logger.debug('Run global repair on node1')
         node1.repair()
 
-        debug('Read data from MV at quorum (new data should be returned after repair)')
-        for i in xrange(1000):
+        logger.debug('Read data from MV at quorum (new data should be returned after repair)')
+        for i in range(1000):
             v = i * 2
             assert_one(
                 session,
@@ -1995,8 +1974,8 @@ class TestMaterializedViews(Tester):
                 cl=ConsistencyLevel.QUORUM
             )
 
-    @attr('resource-intensive')
-    def throttled_partition_update_test(self):
+    @pytest.mark.resource_intensive
+    def test_throttled_partition_update(self):
         """
         @jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.
 
@@ -2017,7 +1996,7 @@ class TestMaterializedViews(Tester):
 
         session.cluster.control_connection.wait_for_schema_agreement()
 
-        debug('Shutdown node2 and node3')
+        logger.debug('Shutdown node2 and node3')
         node2.stop(wait_other_notice=True)
         node3.stop(wait_other_notice=True)
 
@@ -2025,26 +2004,26 @@ class TestMaterializedViews(Tester):
         range_deletion_ts = 30
         partition_deletion_ts = 10
 
-        for ck1 in xrange(size):
-            for ck2 in xrange(size):
+        for ck1 in range(size):
+            for ck2 in range(size):
                 session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
                                 " VALUES (1, {}, {}, {}, {}) USING TIMESTAMP {}".format(ck1, ck2, ck1, ck2, ck1))
 
         self._replay_batchlogs()
 
-        for ck1 in xrange(size):
-            for ck2 in xrange(size):
+        for ck1 in range(size):
+            for ck2 in range(size):
                 assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
                            [1, ck1, ck2, ck1, ck2])
                 assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
                            [1, ck1, ck2, ck1, ck2])
 
-        debug('Shutdown node4 and node5')
+        logger.debug('Shutdown node4 and node5')
         node4.stop(wait_other_notice=True)
         node5.stop(wait_other_notice=True)
 
-        for ck1 in xrange(size):
-            for ck2 in xrange(size):
+        for ck1 in range(size):
+            for ck2 in range(size):
                 if ck1 % 2 == 0:  # range tombstone
                     session.execute("DELETE FROM ks.t USING TIMESTAMP 50 WHERE pk=1 AND ck1={}".format(ck1))
                 elif ck1 == ck2:  # row tombstone
@@ -2061,27 +2040,27 @@ class TestMaterializedViews(Tester):
         self._replay_batchlogs()
 
         # start nodes with different batch size
-        debug('Starting nodes')
+        logger.debug('Starting nodes')
         node2.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(2)])
         node3.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5)])
         node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(50)])
         node5.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5000)])
         self._replay_batchlogs()
 
-        debug('repairing base table')
+        logger.debug('repairing base table')
         node1.nodetool("repair ks t")
         self._replay_batchlogs()
 
-        debug('stop cluster')
+        logger.debug('stop cluster')
         self.cluster.stop()
 
-        debug('rolling restart to check repaired data on each node')
+        logger.debug('rolling restart to check repaired data on each node')
         for node in self.cluster.nodelist():
-            debug('starting {}'.format(node.name))
+            logger.debug('starting {}'.format(node.name))
             node.start(wait_other_notice=True, wait_for_binary_proto=True)
             session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
-            for ck1 in xrange(size):
-                for ck2 in xrange(size):
+            for ck1 in range(size):
+                for ck2 in range(size):
                     if (
                         ck1 <= partition_deletion_ts or  # partition deletion
                         ck1 == ck2 or ck1 % 2 == 0 or  # row deletion or range tombstone
@@ -2101,15 +2080,14 @@ class TestMaterializedViews(Tester):
                                             "ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
                         assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
                                             "ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
-            debug('stopping {}'.format(node.name))
+            logger.debug('stopping {}'.format(node.name))
             node.stop(wait_other_notice=True, wait_for_binary_proto=True)
 
-    @attr('resource-intensive')
-    def really_complex_repair_test(self):
+    @pytest.mark.resource_intensive
+    def test_really_complex_repair(self):
         """
         Test that a materialized view are consistent after a more complex repair.
         """
-
         session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
         node1, node2, node3, node4, node5 = self.cluster.nodelist()
 
@@ -2122,40 +2100,40 @@ class TestMaterializedViews(Tester):
 
         session.cluster.control_connection.wait_for_schema_agreement()
 
-        debug('Shutdown node2 and node3')
+        logger.debug('Shutdown node2 and node3')
         node2.stop(wait_other_notice=True)
         node3.stop(wait_other_notice=True)
 
         session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
         session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
         self._replay_batchlogs()
-        debug('Verify the data in the MV on node1 with CL=ONE')
+        logger.debug('Verify the data in the MV on node1 with CL=ONE')
         assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
 
         session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
         session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
         self._replay_batchlogs()
-        debug('Verify the data in the MV on node1 with CL=ONE')
+        logger.debug('Verify the data in the MV on node1 with CL=ONE')
         assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
 
         session.shutdown()
 
-        debug('Shutdown node1, node4 and node5')
+        logger.debug('Shutdown node1, node4 and node5')
         node1.stop()
         node4.stop()
         node5.stop()
 
-        debug('Start nodes 2 and 3')
+        logger.debug('Start nodes 2 and 3')
         node2.start()
         node3.start(wait_other_notice=True, wait_for_binary_proto=True)
 
         session2 = self.patient_cql_connection(node2)
         session2.execute('USE ks')
 
-        debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
+        logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
         assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
 
-        debug('Write new data in node2 that overlap those in node1')
+        logger.debug('Write new data in node2 that overlap those in node1')
         session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
         session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
         self._replay_batchlogs()
@@ -2166,17 +2144,17 @@ class TestMaterializedViews(Tester):
         self._replay_batchlogs()
         assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
 
-        debug("Composite delete of everything")
+        logger.debug("Composite delete of everything")
         session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
         session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
         self._replay_batchlogs()
         assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
         assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
 
-        debug('Wait for batchlogs to expire from node2 and node3')
+        logger.debug('Wait for batchlogs to expire from node2 and node3')
         time.sleep(5)
 
-        debug('Start remaining nodes')
+        logger.debug('Start remaining nodes')
         node1.start(wait_other_notice=True, wait_for_binary_proto=True)
         node4.start(wait_other_notice=True, wait_for_binary_proto=True)
         node5.start(wait_other_notice=True, wait_for_binary_proto=True)
@@ -2189,23 +2167,22 @@ class TestMaterializedViews(Tester):
             cl=ConsistencyLevel.QUORUM
         )
 
-        debug('Run global repair on node1')
+        logger.debug('Run global repair on node1')
         node1.repair()
 
         assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
 
-    def complex_mv_select_statements_test(self):
+    def test_complex_mv_select_statements(self):
         """
         Test complex MV select statements
         @jira_ticket CASSANDRA-9664
         """
-
         cluster = self.cluster
         cluster.populate(3).start()
         node1 = cluster.nodelist()[0]
         session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
 
-        debug("Creating keyspace")
+        logger.debug("Creating keyspace")
         session.execute("CREATE KEYSPACE mvtest WITH replication = "
                         "{'class': 'SimpleStrategy', 'replication_factor': '3'}")
         session.execute('USE mvtest')
@@ -2240,7 +2217,7 @@ class TestMaterializedViews(Tester):
             for row in rows:
                 session.execute(insert_stmt, row)
 
-            debug("Testing MV primary key: {}".format(mv_primary_key))
+            logger.debug("Testing MV primary key: {}".format(mv_primary_key))
 
             session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
                             "a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
@@ -2352,12 +2329,12 @@ class TestMaterializedViews(Tester):
         cluster.start()
 
         # node3 should have received and ignored the creation of the MV over the dropped table
-        self.assertTrue(node3.grep_log('Not adding view users_by_state because the base table'))
+        assert node3.grep_log('Not adding view users_by_state because the base table')
 
-    def base_view_consistency_on_failure_after_mv_apply_test(self):
+    def test_base_view_consistency_on_failure_after_mv_apply(self):
         self._test_base_view_consistency_on_crash("after")
 
-    def base_view_consistency_on_failure_before_mv_apply_test(self):
+    def test_base_view_consistency_on_failure_before_mv_apply(self):
         self._test_base_view_consistency_on_crash("before")
 
     def _test_base_view_consistency_on_crash(self, fail_phase):
@@ -2370,7 +2347,7 @@ class TestMaterializedViews(Tester):
         """
 
         self.cluster.set_batch_commitlog(enabled=True)
-        self.ignore_log_patterns = [r'Dummy failure', r"Failed to force-recycle all segments"]
+        self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r"Failed to force-recycle all segments"]
         self.prepare(rf=1, install_byteman=True)
         node1, node2, node3 = self.cluster.nodelist()
         session = self.patient_exclusive_cql_connection(node1)
@@ -2382,25 +2359,25 @@ class TestMaterializedViews(Tester):
 
         session.cluster.control_connection.wait_for_schema_agreement()
 
-        debug('Make node1 fail {} view writes'.format(fail_phase))
+        logger.debug('Make node1 fail {} view writes'.format(fail_phase))
         node1.byteman_submit(['./byteman/fail_{}_view_write.btm'.format(fail_phase)])
 
-        debug('Write 1000 rows - all node1 writes should fail')
+        logger.debug('Write 1000 rows - all node1 writes should fail')
 
         failed = False
-        for i in xrange(1, 1000):
+        for i in range(1, 1000):
             try:
                 session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}".format(v=i))
             except WriteFailure:
                 failed = True
 
-        self.assertTrue(failed, "Should fail at least once.")
-        self.assertTrue(node1.grep_log("Dummy failure"), "Should throw Dummy failure")
+        assert failed, "Should fail at least once."
+        assert node1.grep_log("Dummy failure"), "Should throw Dummy failure"
 
         missing_entries = 0
         session = self.patient_exclusive_cql_connection(node1)
         session.execute('USE ks')
-        for i in xrange(1, 1000):
+        for i in range(1, 1000):
             view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
                                                       consistency_level=ConsistencyLevel.ONE)))
             base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
@@ -2411,29 +2388,29 @@ class TestMaterializedViews(Tester):
             if not view_entry:
                 missing_entries += 1
 
-        debug("Missing entries {}".format(missing_entries))
-        self.assertTrue(missing_entries > 0, )
+        logger.debug("Missing entries {}".format(missing_entries))
+        assert missing_entries > 0
 
-        debug('Restarting node1 to ensure commit log is replayed')
+        logger.debug('Restarting node1 to ensure commit log is replayed')
         node1.stop(wait_other_notice=True)
         # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
         node1.start(jvm_args=["-Dcassandra.batchlog.replay_timeout_in_ms=1"])
 
-        debug('Replay batchlogs')
+        logger.debug('Replay batchlogs')
         time.sleep(0.001)  # Wait batchlog.replay_timeout_in_ms=1 (ms)
         self._replay_batchlogs()
 
-        debug('Verify that both the base table entry and view are present after commit and batchlog replay')
+        logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')
         session = self.patient_exclusive_cql_connection(node1)
         session.execute('USE ks')
-        for i in xrange(1, 1000):
+        for i in range(1, 1000):
             view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
                                                       consistency_level=ConsistencyLevel.ONE)))
             base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
                                                       consistency_level=ConsistencyLevel.ONE)))
 
-            self.assertTrue(base_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry))
-            self.assertTrue(view_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry))
+            assert base_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
+            assert view_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
 
 
 # For read verification
@@ -2513,7 +2490,7 @@ SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
 
 
 def row_generate(i, num_partitions):
-    return SimpleRow(a=i % num_partitions, b=(i % 400) / num_partitions, c=i, d=i)
+    return SimpleRow(a=i % num_partitions, b=(i % 400) // num_partitions, c=i, d=i)
 
 
 # Create a threaded session and execute queries from a Queue
@@ -2547,12 +2524,12 @@ def thread_session(ip, queue, start, end, rows, num_partitions):
             ret = execute_query(session, select_gi, i)
             queue.put_nowait(ret)
     except Exception as e:
-        print str(e)
+        print(str(e))
         queue.close()
 
 
 @since('3.0')
-@skipIf(sys.platform == 'win32', 'Bug in python on Windows: https://bugs.python.org/issue10128')
+@pytest.mark.skipif(sys.platform == 'win32', reason='Bug in python on Windows: https://bugs.python.org/issue10128')
 class TestMaterializedViewsConsistency(Tester):
 
     def prepare(self, user_table=False):
@@ -2569,14 +2546,14 @@ class TestMaterializedViewsConsistency(Tester):
         self.rows = {}
         self.up

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[13/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/v22/Cassandra-remote
----------------------------------------------------------------------
diff --git a/thrift_bindings/v22/Cassandra-remote b/thrift_bindings/v22/Cassandra-remote
deleted file mode 100755
index 941d5a4..0000000
--- a/thrift_bindings/v22/Cassandra-remote
+++ /dev/null
@@ -1,396 +0,0 @@
-#!/usr/bin/env python
-#
-# Autogenerated by Thrift Compiler (0.9.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-#  options string: py
-#
-
-import sys
-import pprint
-from urlparse import urlparse
-from thrift.transport import TTransport
-from thrift.transport import TSocket
-from thrift.transport import THttpClient
-from thrift.protocol import TBinaryProtocol
-
-import Cassandra
-from ttypes import *
-
-if len(sys.argv) <= 1 or sys.argv[1] == '--help':
-  print ''
-  print 'Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] function [arg1 [arg2...]]'
-  print ''
-  print 'Functions:'
-  print '  void login(AuthenticationRequest auth_request)'
-  print '  void set_keyspace(string keyspace)'
-  print '  ColumnOrSuperColumn get(string key, ColumnPath column_path, ConsistencyLevel consistency_level)'
-  print '   get_slice(string key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)'
-  print '  i32 get_count(string key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)'
-  print '   multiget_slice( keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)'
-  print '   multiget_count( keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)'
-  print '   get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)'
-  print '   get_paged_slice(string column_family, KeyRange range, string start_column, ConsistencyLevel consistency_level)'
-  print '   get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level)'
-  print '  void insert(string key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)'
-  print '  void add(string key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)'
-  print '  CASResult cas(string key, string column_family,  expected,  updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel commit_consistency_level)'
-  print '  void remove(string key, ColumnPath column_path, i64 timestamp, ConsistencyLevel consistency_level)'
-  print '  void remove_counter(string key, ColumnPath path, ConsistencyLevel consistency_level)'
-  print '  void batch_mutate( mutation_map, ConsistencyLevel consistency_level)'
-  print '  void atomic_batch_mutate( mutation_map, ConsistencyLevel consistency_level)'
-  print '  void truncate(string cfname)'
-  print '   get_multi_slice(MultiSliceRequest request)'
-  print '   describe_schema_versions()'
-  print '   describe_keyspaces()'
-  print '  string describe_cluster_name()'
-  print '  string describe_version()'
-  print '   describe_ring(string keyspace)'
-  print '   describe_local_ring(string keyspace)'
-  print '   describe_token_map()'
-  print '  string describe_partitioner()'
-  print '  string describe_snitch()'
-  print '  KsDef describe_keyspace(string keyspace)'
-  print '   describe_splits(string cfName, string start_token, string end_token, i32 keys_per_split)'
-  print '  string trace_next_query()'
-  print '   describe_splits_ex(string cfName, string start_token, string end_token, i32 keys_per_split)'
-  print '  string system_add_column_family(CfDef cf_def)'
-  print '  string system_drop_column_family(string column_family)'
-  print '  string system_add_keyspace(KsDef ks_def)'
-  print '  string system_drop_keyspace(string keyspace)'
-  print '  string system_update_keyspace(KsDef ks_def)'
-  print '  string system_update_column_family(CfDef cf_def)'
-  print '  CqlResult execute_cql_query(string query, Compression compression)'
-  print '  CqlResult execute_cql3_query(string query, Compression compression, ConsistencyLevel consistency)'
-  print '  CqlPreparedResult prepare_cql_query(string query, Compression compression)'
-  print '  CqlPreparedResult prepare_cql3_query(string query, Compression compression)'
-  print '  CqlResult execute_prepared_cql_query(i32 itemId,  values)'
-  print '  CqlResult execute_prepared_cql3_query(i32 itemId,  values, ConsistencyLevel consistency)'
-  print '  void set_cql_version(string version)'
-  print ''
-  sys.exit(0)
-
-pp = pprint.PrettyPrinter(indent = 2)
-host = 'localhost'
-port = 9090
-uri = ''
-framed = False
-http = False
-argi = 1
-
-if sys.argv[argi] == '-h':
-  parts = sys.argv[argi+1].split(':')
-  host = parts[0]
-  if len(parts) > 1:
-    port = int(parts[1])
-  argi += 2
-
-if sys.argv[argi] == '-u':
-  url = urlparse(sys.argv[argi+1])
-  parts = url[1].split(':')
-  host = parts[0]
-  if len(parts) > 1:
-    port = int(parts[1])
-  else:
-    port = 80
-  uri = url[2]
-  if url[4]:
-    uri += '?%s' % url[4]
-  http = True
-  argi += 2
-
-if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
-  framed = True
-  argi += 1
-
-cmd = sys.argv[argi]
-args = sys.argv[argi+1:]
-
-if http:
-  transport = THttpClient.THttpClient(host, port, uri)
-else:
-  socket = TSocket.TSocket(host, port)
-  if framed:
-    transport = TTransport.TFramedTransport(socket)
-  else:
-    transport = TTransport.TBufferedTransport(socket)
-protocol = TBinaryProtocol.TBinaryProtocol(transport)
-client = Cassandra.Client(protocol)
-transport.open()
-
-if cmd == 'login':
-  if len(args) != 1:
-    print 'login requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.login(eval(args[0]),))
-
-elif cmd == 'set_keyspace':
-  if len(args) != 1:
-    print 'set_keyspace requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.set_keyspace(args[0],))
-
-elif cmd == 'get':
-  if len(args) != 3:
-    print 'get requires 3 args'
-    sys.exit(1)
-  pp.pprint(client.get(args[0],eval(args[1]),eval(args[2]),))
-
-elif cmd == 'get_slice':
-  if len(args) != 4:
-    print 'get_slice requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.get_slice(args[0],eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'get_count':
-  if len(args) != 4:
-    print 'get_count requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.get_count(args[0],eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'multiget_slice':
-  if len(args) != 4:
-    print 'multiget_slice requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.multiget_slice(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'multiget_count':
-  if len(args) != 4:
-    print 'multiget_count requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.multiget_count(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'get_range_slices':
-  if len(args) != 4:
-    print 'get_range_slices requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.get_range_slices(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'get_paged_slice':
-  if len(args) != 4:
-    print 'get_paged_slice requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.get_paged_slice(args[0],eval(args[1]),args[2],eval(args[3]),))
-
-elif cmd == 'get_indexed_slices':
-  if len(args) != 4:
-    print 'get_indexed_slices requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.get_indexed_slices(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'insert':
-  if len(args) != 4:
-    print 'insert requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.insert(args[0],eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'add':
-  if len(args) != 4:
-    print 'add requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.add(args[0],eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'cas':
-  if len(args) != 6:
-    print 'cas requires 6 args'
-    sys.exit(1)
-  pp.pprint(client.cas(args[0],args[1],eval(args[2]),eval(args[3]),eval(args[4]),eval(args[5]),))
-
-elif cmd == 'remove':
-  if len(args) != 4:
-    print 'remove requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.remove(args[0],eval(args[1]),eval(args[2]),eval(args[3]),))
-
-elif cmd == 'remove_counter':
-  if len(args) != 3:
-    print 'remove_counter requires 3 args'
-    sys.exit(1)
-  pp.pprint(client.remove_counter(args[0],eval(args[1]),eval(args[2]),))
-
-elif cmd == 'batch_mutate':
-  if len(args) != 2:
-    print 'batch_mutate requires 2 args'
-    sys.exit(1)
-  pp.pprint(client.batch_mutate(eval(args[0]),eval(args[1]),))
-
-elif cmd == 'atomic_batch_mutate':
-  if len(args) != 2:
-    print 'atomic_batch_mutate requires 2 args'
-    sys.exit(1)
-  pp.pprint(client.atomic_batch_mutate(eval(args[0]),eval(args[1]),))
-
-elif cmd == 'truncate':
-  if len(args) != 1:
-    print 'truncate requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.truncate(args[0],))
-
-elif cmd == 'get_multi_slice':
-  if len(args) != 1:
-    print 'get_multi_slice requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.get_multi_slice(eval(args[0]),))
-
-elif cmd == 'describe_schema_versions':
-  if len(args) != 0:
-    print 'describe_schema_versions requires 0 args'
-    sys.exit(1)
-  pp.pprint(client.describe_schema_versions())
-
-elif cmd == 'describe_keyspaces':
-  if len(args) != 0:
-    print 'describe_keyspaces requires 0 args'
-    sys.exit(1)
-  pp.pprint(client.describe_keyspaces())
-
-elif cmd == 'describe_cluster_name':
-  if len(args) != 0:
-    print 'describe_cluster_name requires 0 args'
-    sys.exit(1)
-  pp.pprint(client.describe_cluster_name())
-
-elif cmd == 'describe_version':
-  if len(args) != 0:
-    print 'describe_version requires 0 args'
-    sys.exit(1)
-  pp.pprint(client.describe_version())
-
-elif cmd == 'describe_ring':
-  if len(args) != 1:
-    print 'describe_ring requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.describe_ring(args[0],))
-
-elif cmd == 'describe_local_ring':
-  if len(args) != 1:
-    print 'describe_local_ring requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.describe_local_ring(args[0],))
-
-elif cmd == 'describe_token_map':
-  if len(args) != 0:
-    print 'describe_token_map requires 0 args'
-    sys.exit(1)
-  pp.pprint(client.describe_token_map())
-
-elif cmd == 'describe_partitioner':
-  if len(args) != 0:
-    print 'describe_partitioner requires 0 args'
-    sys.exit(1)
-  pp.pprint(client.describe_partitioner())
-
-elif cmd == 'describe_snitch':
-  if len(args) != 0:
-    print 'describe_snitch requires 0 args'
-    sys.exit(1)
-  pp.pprint(client.describe_snitch())
-
-elif cmd == 'describe_keyspace':
-  if len(args) != 1:
-    print 'describe_keyspace requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.describe_keyspace(args[0],))
-
-elif cmd == 'describe_splits':
-  if len(args) != 4:
-    print 'describe_splits requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.describe_splits(args[0],args[1],args[2],eval(args[3]),))
-
-elif cmd == 'trace_next_query':
-  if len(args) != 0:
-    print 'trace_next_query requires 0 args'
-    sys.exit(1)
-  pp.pprint(client.trace_next_query())
-
-elif cmd == 'describe_splits_ex':
-  if len(args) != 4:
-    print 'describe_splits_ex requires 4 args'
-    sys.exit(1)
-  pp.pprint(client.describe_splits_ex(args[0],args[1],args[2],eval(args[3]),))
-
-elif cmd == 'system_add_column_family':
-  if len(args) != 1:
-    print 'system_add_column_family requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.system_add_column_family(eval(args[0]),))
-
-elif cmd == 'system_drop_column_family':
-  if len(args) != 1:
-    print 'system_drop_column_family requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.system_drop_column_family(args[0],))
-
-elif cmd == 'system_add_keyspace':
-  if len(args) != 1:
-    print 'system_add_keyspace requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.system_add_keyspace(eval(args[0]),))
-
-elif cmd == 'system_drop_keyspace':
-  if len(args) != 1:
-    print 'system_drop_keyspace requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.system_drop_keyspace(args[0],))
-
-elif cmd == 'system_update_keyspace':
-  if len(args) != 1:
-    print 'system_update_keyspace requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.system_update_keyspace(eval(args[0]),))
-
-elif cmd == 'system_update_column_family':
-  if len(args) != 1:
-    print 'system_update_column_family requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.system_update_column_family(eval(args[0]),))
-
-elif cmd == 'execute_cql_query':
-  if len(args) != 2:
-    print 'execute_cql_query requires 2 args'
-    sys.exit(1)
-  pp.pprint(client.execute_cql_query(args[0],eval(args[1]),))
-
-elif cmd == 'execute_cql3_query':
-  if len(args) != 3:
-    print 'execute_cql3_query requires 3 args'
-    sys.exit(1)
-  pp.pprint(client.execute_cql3_query(args[0],eval(args[1]),eval(args[2]),))
-
-elif cmd == 'prepare_cql_query':
-  if len(args) != 2:
-    print 'prepare_cql_query requires 2 args'
-    sys.exit(1)
-  pp.pprint(client.prepare_cql_query(args[0],eval(args[1]),))
-
-elif cmd == 'prepare_cql3_query':
-  if len(args) != 2:
-    print 'prepare_cql3_query requires 2 args'
-    sys.exit(1)
-  pp.pprint(client.prepare_cql3_query(args[0],eval(args[1]),))
-
-elif cmd == 'execute_prepared_cql_query':
-  if len(args) != 2:
-    print 'execute_prepared_cql_query requires 2 args'
-    sys.exit(1)
-  pp.pprint(client.execute_prepared_cql_query(eval(args[0]),eval(args[1]),))
-
-elif cmd == 'execute_prepared_cql3_query':
-  if len(args) != 3:
-    print 'execute_prepared_cql3_query requires 3 args'
-    sys.exit(1)
-  pp.pprint(client.execute_prepared_cql3_query(eval(args[0]),eval(args[1]),eval(args[2]),))
-
-elif cmd == 'set_cql_version':
-  if len(args) != 1:
-    print 'set_cql_version requires 1 args'
-    sys.exit(1)
-  pp.pprint(client.set_cql_version(args[0],))
-
-else:
-  print 'Unrecognized method %s' % cmd
-  sys.exit(1)
-
-transport.close()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[36/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
Migrate dtests to use pytest and python3

Patch by Michael Kjellman; Reviewed by Ariel Weisberg for CASSANDRA-14134


Project: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/commit/49b2dda4
Tree: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/tree/49b2dda4
Diff: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/diff/49b2dda4

Branch: refs/heads/master
Commit: 49b2dda4e6643d2b18376d504b5fea4c0b3354a7
Parents: f4eda3a
Author: Michael Kjellman <kj...@apple.com>
Authored: Thu Jan 25 15:56:18 2018 -0500
Committer: Ariel Weisberg <aw...@apple.com>
Committed: Mon Jan 29 15:37:06 2018 -0500

----------------------------------------------------------------------
 CONTRIBUTING.md                                |     2 +-
 INSTALL.md                                     |   134 -
 README.md                                      |   224 +-
 auth_test.py                                   |   323 +-
 batch_test.py                                  |   130 +-
 bin/collect_known_failures.py                  |    58 -
 bootstrap_test.py                              |   193 +-
 cassandra-thrift/v11/Cassandra.py              |   336 +-
 cassandra-thrift/v11/constants.py              |     2 +-
 cassandra-thrift/v11/ttypes.py                 |   124 +-
 cdc_test.py                                    |   137 +-
 cfid_test.py                                   |    13 +-
 commitlog_test.py                              |   250 +-
 compaction_test.py                             |   198 +-
 compression_test.py                            |    50 +-
 concurrent_schema_changes_test.py              |   118 +-
 configuration_test.py                          |    69 +-
 conftest.py                                    |   489 +
 consistency_test.py                            |   191 +-
 consistent_bootstrap_test.py                   |    74 +-
 counter_test.py                                |   417 +
 counter_tests.py                               |   414 -
 cql_prepared_test.py                           |     8 +-
 cql_test.py                                    |  1503 +++
 cql_tests.py                                   |  1501 ---
 cql_tracing_test.py                            |    81 +-
 cqlsh_tests/cqlsh_copy_tests.py                |   603 +-
 cqlsh_tests/cqlsh_tests.py                     |   325 +-
 cqlsh_tests/cqlsh_tools.py                     |     3 +-
 delete_insert_test.py                          |    17 +-
 deletion_test.py                               |    20 +-
 disk_balance_test.py                           |   117 +-
 dtest.py                                       |   986 +-
 dtest_setup.py                                 |   498 +
 dtest_setup_overrides.py                       |     3 +
 global_row_key_cache_test.py                   |    48 +-
 hintedhandoff_test.py                          |    58 +-
 internode_ssl_test.py                          |    16 +-
 jmx_auth_test.py                               |    19 +-
 jmx_test.py                                    |   158 +-
 json_test.py                                   |    91 +-
 json_tools_test.py                             |    56 +-
 largecolumn_test.py                            |    30 +-
 materialized_views_test.py                     |   796 +-
 meta_tests/assertion_test.py                   |    12 +-
 meta_tests/utils_test/funcutils_test.py        |     8 +-
 meta_tests/utils_test/metadata_wrapper_test.py |    49 +-
 metadata_test.py                               |    68 +
 metadata_tests.py                              |    65 -
 mixed_version_test.py                          |    28 +-
 multidc_putget_test.py                         |     8 +-
 native_transport_ssl_test.py                   |    32 +-
 nodetool_test.py                               |   107 +-
 offline_tools_test.py                          |   216 +-
 paging_test.py                                 |  1408 ++-
 paxos_test.py                                  |   195 +
 paxos_tests.py                                 |   192 -
 pending_range_test.py                          |    26 +-
 plugins/assert_tools.py                        |   138 +
 plugins/dtestcollect.py                        |    92 -
 plugins/dtestconfig.py                         |    42 -
 plugins/dtesttag.py                            |    45 -
 plugins/dtestxunit.py                          |   348 -
 prepared_statements_test.py                    |    15 +-
 pushed_notifications_test.py                   |   174 +-
 putget_test.py                                 |    52 +-
 pytest.ini                                     |     5 +
 range_ghost_test.py                            |     9 +-
 read_failures_test.py                          |    37 +-
 read_repair_test.py                            |   226 +-
 rebuild_test.py                                |   116 +-
 repair_tests/deprecated_repair_test.py         |   127 +-
 repair_tests/incremental_repair_test.py        |   404 +-
 repair_tests/preview_repair_test.py            |    32 +-
 repair_tests/repair_test.py                    |   380 +-
 replace_address_test.py                        |   216 +-
 replication_test.py                            |   122 +-
 requirements.txt                               |    19 +-
 run_dtests.py                                  |   486 +-
 schema_metadata_test.py                        |   457 +-
 schema_test.py                                 |    30 +-
 scrub_test.py                                  |    85 +-
 secondary_indexes_test.py                      |   282 +-
 snapshot_test.py                               |   146 +-
 snitch_test.py                                 |    41 +-
 sslnodetonode_test.py                          |    49 +-
 sstable_generation_loading_test.py             |   114 +-
 sstablesplit_test.py                           |    48 +-
 sstableutil_test.py                            |    63 +-
 stress_tool_test.py                            |    13 +-
 super_column_cache_test.py                     |    66 +-
 super_counter_test.py                          |    39 +-
 system_keyspaces_test.py                       |     8 +-
 thrift_bindings/thrift010/Cassandra-remote     |   425 +
 thrift_bindings/thrift010/Cassandra.py         | 10961 ++++++++++++++++++
 thrift_bindings/thrift010/__init__.py          |     1 +
 thrift_bindings/thrift010/constants.py         |    13 +
 thrift_bindings/thrift010/ttypes.py            |  4218 +++++++
 thrift_bindings/v22/Cassandra-remote           |   396 -
 thrift_bindings/v22/Cassandra.py               | 10506 -----------------
 thrift_bindings/v22/__init__.py                |     1 -
 thrift_bindings/v22/constants.py               |    14 -
 thrift_bindings/v22/ttypes.py                  |  4219 -------
 thrift_hsha_test.py                            |    51 +-
 thrift_test.py                                 |  2649 +++++
 thrift_tests.py                                |  2679 -----
 token_generator_test.py                        |    53 +-
 tools/assertions.py                            |   100 +-
 tools/context.py                               |     4 +-
 tools/data.py                                  |    42 +-
 tools/datahelp.py                              |    20 +-
 tools/decorators.py                            |   106 -
 tools/files.py                                 |     5 +-
 tools/git.py                                   |    14 +-
 tools/hacks.py                                 |    30 +-
 tools/intervention.py                          |     8 +-
 tools/jmxutils.py                              |    32 +-
 tools/metadata_wrapper.py                      |     4 +-
 tools/misc.py                                  |    56 +-
 tools/paging.py                                |     7 +-
 topology_test.py                               |   136 +-
 ttl_test.py                                    |    87 +-
 udtencoding_test.py                            |     5 +-
 upgrade_crc_check_chance_test.py               |    55 +-
 upgrade_internal_auth_test.py                  |    64 +-
 upgrade_tests/bootstrap_upgrade_test.py        |    15 +-
 upgrade_tests/compatibility_flag_test.py       |    42 +-
 upgrade_tests/cql_tests.py                     |   841 +-
 upgrade_tests/paging_test.py                   |   456 +-
 upgrade_tests/regression_test.py               |    42 +-
 upgrade_tests/repair_test.py                   |    15 +-
 upgrade_tests/storage_engine_upgrade_test.py   |    64 +-
 upgrade_tests/thrift_upgrade_test.py           |   130 +-
 upgrade_tests/upgrade_base.py                  |    74 +-
 upgrade_tests/upgrade_compact_storage.py       |    84 +-
 upgrade_tests/upgrade_manifest.py              |    16 +-
 upgrade_tests/upgrade_schema_agreement_test.py |    68 +-
 upgrade_tests/upgrade_supercolumns_test.py     |    67 +-
 upgrade_tests/upgrade_through_versions_test.py |   179 +-
 user_functions_test.py                         |    47 +-
 user_types_test.py                             |    87 +-
 wide_rows_test.py                              |    19 +-
 write_failures_test.py                         |    49 +-
 143 files changed, 28963 insertions(+), 28116 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 0b228c0..6131160 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -46,7 +46,7 @@ Description       | Brief description of the test
 
 
 ```python
-def example_test(self):
+def test_example(self):
 """
 Demonstrates the expected syntax for a test plan. Parsed by Doxygen.
 @jira_ticket CASSANDRA-0000

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/INSTALL.md
----------------------------------------------------------------------
diff --git a/INSTALL.md b/INSTALL.md
deleted file mode 100644
index b37bf60..0000000
--- a/INSTALL.md
+++ /dev/null
@@ -1,134 +0,0 @@
-Setup instructions for cassandra-dtest
-======================================
-
-These are instructions for setting up dtests on a fresh install of Ubuntu Linux 12.04 LTS. If you use something else, you'll need to adapt these for your particular situation (or better yet, append to this file with your platform's requirements and send a pull request.)
-
-## Prerequisite Software:
-* Update software repositories:
-
-        sudo apt-get update
-
-* python
-
-        sudo apt-get install python python-setuptools python-dev python-pip
-
-* git
-
-        sudo apt-get install git
-
-* windows
-
-        python: https://www.python.org/downloads/
-        git:https://msysgit.github.io/
-        gnuwin32: http://gnuwin32.sourceforge.net/
-        apache ant: https://ant.apache.org/bindownload.cgi
-
-## Install Oracle Java 8:
-* java and misc tools:
-
-        sudo apt-get install software-properties-common
-        sudo add-apt-repository ppa:webupd8team/java
-        sudo apt-get update
-        sudo apt-get install oracle-java8-installer
-
-        Windows: http://www.oracle.com/technetwork/java/javase/downloads/index.html
-
-* Ensure that java is a HotSpot 1.8.x version:
-
-        # java -version
-        java version "1.8.0_73"
-        Java(TM) SE Runtime Environment (build 1.8.0_73)
-        Java HotSpot(TM) 64-Bit Server VM (build 24.0-b56, mixed mode)
-
-* install ant
-
-        sudo apt-get install ant
-
-## Create a git directory for holding several projects we'll use:
-
-        mkdir -p ~/git/cstar
-
-## Install companion tools / libraries:
-It's best to download the git source tree for these libraries as you
-will often need to modify them in some fashion at some later point:
-
-* ccm:
-
-        cd ~/git/cstar
-        git clone git://github.com/pcmanus/ccm.git
-        sudo apt-get install libyaml-dev
-        sudo pip install -e ccm
-        sudo pip install pyyaml
-
-* python-driver
-
-        cd ~/git/cstar
-        Cassandra 2.x:
-        sudo pip install cassandra-driver
-        Cassandra 3.x (requires latest python-driver):
-        sudo pip install git+git://github.com/datastax/python-driver@cassandra-test  # install dedicated test branch for new Cassandra features
-        sudo pip install --pre cassandra-driver  # fallback driver for new features
-        For more instructions on how to install the python-driver,
-        see http://datastax.github.io/python-driver/installation.html
-
-* cql
-
-        sudo pip install cql
-
-* cassandra-dtest
-
-        cd ~/git/cstar
-        git clone git://github.com/apache/cassandra-dtest.git
-
-* nose
-
-        sudo apt-get install python-nose
-
-* flaky
-
-		sudo pip install flaky
-
-* cassandra
-
-        cd ~/git/cstar
-        git clone http://git-wip-us.apache.org/repos/asf/cassandra.git
-        cd cassandra
-        ant clean jar
-
- Optionally, you can self-check cassandra at this point by running
- it's unit tests:
-
-        ant test
-
- Note: you may need to install ant-optional to get junit working:
-
-        sudo apt-get install ant-optional
-
-## Setup and run dtests
-* Install current python dependencies:
-
-        sudo pip install decorator
-
-* Set CASSANDRA_DIR environment variable.
-  Set the variable in your ~/.bashrc file once so that you don't have to keep setting it everytime you run dtests:
-
-        export CASSANDRA_DIR=~/git/cstar/cassandra
-
-* Run the full dtest suite (takes multiple hours, depending on your hardware):
-
-         cd ~/git/cstar/cassandra-dtest
-         nosetests
-
-* Run the full dtest suite, retrying tests decorated with `flaky` (see [the `flaky` plugin](https://github.com/box/flaky) for more documentation):
-
-         cd ~/git/cstar/cassandra-dtest
-         nosetests --with-flaky
-
-* Run a single dtest, printing debug info, stopping at the first error encountered (if any):
-
-         cd ~/git/cstar/cassandra-dtest
-         PRINT_DEBUG=true nosetests -x -s -v putget_test.py
-
-* Some tests will not run with vnodes enabled (you'll see a "SKIP: Test disabled for vnodes" message in that case). Use the provided runner script instead:
-
-        ./run_dtests.py --vnodes false --nose-options "-x -s -v" topology_test.py:TestTopology.movement_test

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index be595bd..7519925 100644
--- a/README.md
+++ b/README.md
@@ -1,41 +1,82 @@
-Cassandra Distributed Tests
-===========================
+Cassandra Distributed Tests (DTests)
+====================================
 
-Tests for [Apache Cassandra](http://apache.cassandra.org) clusters.
+Cassandra Distributed Tests (or better known as "DTests") are a set of Python-based 
+tests for [Apache Cassandra](http://apache.cassandra.org) clusters. DTests aim to 
+test functionality that requires multiple Cassandra instances. Functionality that
+of code that can be tested in isolation should ideally be a unit test (which can be
+found in the actual Cassandra repository). 
 
-Prerequisites
+Setup and Prerequisites
 ------------
 
-An up to date copy of ccm should be installed for starting and stopping Cassandra.
-The tests are run using nosetests.
-These tests require the datastax python driver.
-A few tests still require the deprecated python CQL over thrift driver.
+Some environmental setup is required before you can start running DTests.
+
+### Native Dependencies
+DTests requires the following native dependencies:
+ * Python 3
+ * PIP for Python 3 
+ * libev
+ * git
+ * JDK 8 (Java)
+ 
+#### Linux
+1. ``apt-get install git-core python3 python3-pip python3-dev libev4 libev-dev``
+2. (Optional - solves warning: "jemalloc shared library could not be preloaded to speed up memory allocations"): 
+``apt-get install -y --no-install-recommends libjemalloc1``
+
+#### Mac
+On Mac, the easiest path is to install the latest [Xcode and Command Line Utilities](https://developer.apple.com) to 
+bootstrap your development environment and then use [Homebrew](https://brew.sh)
+
+1. (Optional) Make sure brew is in a good state on your system ``brew doctor``
+2. ``brew install python3 libev``
+
+### Python Dependencies
+There are multiple external Python dependencies required to run DTests. 
+The current Python depenendcy list is maintained in a file named 
+[requirements.txt](https://github.com/apache/cassandra-dtest/blob/master/requirements.txt) 
+in the root of the cassandra-dtest repository.
+
+The easiest way to install these dependencies is with pip and virtualenv. 
+
+**Note**: While virtualenv isn't strictly required, using virtualenv is almost always the quickest 
+path to success as it provides common base setup across various configurations.
+
+1. Install virtualenv: ``pip install virtualenv``
+2. Create a new virtualenv: ``virtualenv --python=python3 --no-site-packages ~/dtest``
+3. Switch/Activate the new virtualenv: ``source ~/dtest/bin/activate``
+4. Install remaining DTest Python dependencies: ``pip install -r /path/to/cassandra-dtest/requirements.txt``
 
- * [ccm](https://github.com/pcmanus/ccm)
- * [nosetests](http://readthedocs.org/docs/nose/en/latest/)
- * [Python Driver](http://datastax.github.io/python-driver/installation.html)
- * [CQL over Thrift Driver](http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2/)
 
 Usage
 -----
 
-The tests are run by nosetests. The only thing the framework needs to know is
-the location of the (compiled) sources for Cassandra. There are two options:
+The tests are executed by the pytest framework. For convenience, a wrapper ``run_dtests.py`` 
+is included with the intent to make starting execution of the dtests with sane defaults as easy 
+as possible. Most users will most likely find that invoking the tests directly using ``pytest`` 
+ultimately works the best and provides the most flexibility.
+
+Pytest has a great [Usage and Invocations](https://docs.pytest.org/en/latest/usage.html) document which is a great place to start for basic invocation options when using pytest.
+
+At minimum, 
+
+  The only thing the framework needs to know is
+the location of the (compiled (hint: ``ant clean jar``)) sources for Cassandra. There are two options:
 
 Use existing sources:
 
-    CASSANDRA_DIR=~/path/to/cassandra nosetests
+    pytest --cassandra-dir=~/path/to/cassandra
 
 Use ccm ability to download/compile released sources from archives.apache.org:
 
-    CASSANDRA_VERSION=1.0.0 nosetests
+    pytest --cassandra-version=1.0.0
 
 A convenient option if tests are regularly run against the same existing
-directory is to set a `default_dir` in `~/.cassandra-dtest`. Create the file and
-set it to something like:
+directory is to set a `cassandra_dir` in `~/path/to/cassandra-dtest/pytest.ini`:
 
-    [main]
-    default_dir=~/path/to/cassandra
+    [pytest]
+    cassandra_dir=~/path/to/cassandra
 
 The tests will use this directory by default, avoiding the need for any
 environment variable (that still will have precedence if given though).
@@ -43,6 +84,11 @@ environment variable (that still will have precedence if given though).
 Existing tests are probably the best place to start to look at how to write
 tests.
 
+The ``run_dtests.py`` included script is simply a wrapper to make starting the dtests 
+with sane defaults as simple as possible. If you just want to run the tests and do nothing more, 
+this is most likely the most easy place to start; however, anyone attempting to do active development
+ and testing will find invoking pytest directly to be likely the best option.
+
 Each test spawns a new fresh cluster and tears it down after the test. If a
 test fails, the logs for the node are saved in a `logs/<timestamp>` directory
 for analysis (it's not perfect but has been good enough so far, I'm open to
@@ -64,3 +110,141 @@ Writing Tests
 - If you're using JMX via [the `tools.jmxutils` module](tools/jmxutils.py), make sure to call `remove_perf_disable_shared_mem` on the node or nodes you want to query with JMX _before starting the nodes_. `remove_perf_disable_shared_mem` disables a JVM option that's incompatible with JMX (see [this JMX ticket](https://github.com/rhuss/jolokia/issues/198)). It works by performing a string replacement in the node's Cassandra startup script, so changes will only propagate to the node at startup time.
 
 If you'd like to know what to expect during a code review, please see the included [CONTRIBUTING file](CONTRIBUTING.md).
+
+Debugging Tests
+-------------
+Some general tips for debugging dtest/pytest tests
+
+#### pytest.set_trace()
+If there is an unexpected value being asserted on and you'd like to inspect the state of all the tests variables just before a paricular assert, add ``pytest.set_trace()`` right before the problematic code. The next time you execute the test, when that line of code is reached pytest will drop you into an interactive python debugger (pdb). From there you can use standard python options to inspect various methods and variables for debugging.
+
+#### Hung tests/hung pytest framework
+Debugging hung tests can be very difficult but thanks to improvements in Python 3 it's now pretty painless to get a python thread dump of all the threads currently running in the pytest process.
+
+```python
+import faulthandler
+faulthandler.enable()
+```
+Adding the above code will install a signal handler into your process. When the process recieves a *SIGABRT* signal, python will dump python thread dumps for all running threads in the process. DTests installs this by default with the install_debugging_signal_handler fixture.
+
+The following is an example of what you might see if you send a *SIGABRT* signal to the pytest process while in a hung state during the test teardown phase after the successful completion of the actual dtest.
+
+```bash
+(env) cassandra-dtest vcooluser$ kill -SIGABRT 24142
+
+Fatal Python error: Aborted
+
+Thread 0x000070000f739000 (most recent call first):
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 295 in wait
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 551 in wait
+  File "/Users/mkjellman/src/cassandra-dtest/tools/data.py", line 31 in query_c1c2
+  File "/Users/mkjellman/src/cassandra-dtest/bootstrap_test.py", line 91 in <lambda>
+  File "/Users/mkjellman/src/cassandra-dtest/dtest.py", line 245 in run
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916 in _bootstrap_inner
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 884 in _bootstrap
+
+Thread 0x000070000e32d000 (most recent call first):
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncore.py", line 183 in poll2
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncore.py", line 207 in loop
+  File "/Users/mkjellman/env3/src/cassandra-driver/cassandra/io/asyncorereactor.py", line 119 in loop
+  File "/Users/mkjellman/env3/src/cassandra-driver/cassandra/io/asyncorereactor.py", line 258 in _run_loop
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 864 in run
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916 in _bootstrap_inner
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 884 in _bootstrap
+
+Current thread 0x00007fffa00dd340 (most recent call first):
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 1072 in _wait_for_tstate_lock
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 1056 in join
+  File "/Users/mkjellman/src/cassandra-dtest/dtest.py", line 253 in stop
+  File "/Users/mkjellman/src/cassandra-dtest/dtest.py", line 580 in tearDown
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/unittest/case.py", line 608 in run
+  File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/unittest/case.py", line 653 in __call__
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/unittest.py", line 174 in runtest
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/runner.py", line 107 in pytest_runtest_call
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/callers.py", line 180 in _multicall
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 216 in <lambda>
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 222 in _hookexec
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 617 in __call__
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/flaky/flaky_pytest_plugin.py", line 273 in <lambda>
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/runner.py", line 191 in __init__
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/flaky/flaky_pytest_plugin.py", line 274 in call_runtest_hook
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/flaky/flaky_pytest_plugin.py", line 118 in call_and_report
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/runner.py", line 77 in runtestprotocol
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/runner.py", line 63 in pytest_runtest_protocol
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/flaky/flaky_pytest_plugin.py", line 81 in pytest_runtest_protocol
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/callers.py", line 180 in _multicall
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 216 in <lambda>
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 222 in _hookexec
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 617 in __call__
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/main.py", line 164 in pytest_runtestloop
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/callers.py", line 180 in _multicall
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 216 in <lambda>
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 222 in _hookexec
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 617 in __call__
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/main.py", line 141 in _main
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/main.py", line 103 in wrap_session
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/main.py", line 134 in pytest_cmdline_main
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/callers.py", line 180 in _multicall
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 216 in <lambda>
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 222 in _hookexec
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/pluggy/__init__.py", line 617 in __call__
+  File "/Users/mkjellman/env3/lib/python3.6/site-packages/_pytest/config.py", line 59 in main
+  File "/Users/mkjellman/env3/bin/pytest", line 11 in <module>
+Abort trap: 6
+```
+
+#### Debugging Issues with Fixtures and Test Setup/Teardown
+pytest can appear to be doing "magic" more often than not. One place it may be hard to follow what actual code will get executed by normal code inspection alone is determining which fixtures will run for a given test and in what order. pytest provides a ``--setup-plan`` command line argument. When pytest is invoked with this argument it will print a execution plan including all fixtures and tests that actually running the test will invoke. The below is an example for the current execution plan pytest generates for dtest *auth_test.py::TestAuthRoles::test_create_drop_role*
+
+```bash
+(env3) Michaels-MacBook-Pro:cassandra-dtest mkjellman$ pytest --cassandra-dir=/Users/mkjellman/src/mkjellman-oss-github-cassandra-trunk auth_test.py::TestAuthRoles::test_create_drop_role --setup-plan
+====================================================================== test session starts ======================================================================
+platform darwin -- Python 3.6.3, pytest-3.3.0, py-1.5.2, pluggy-0.6.0
+rootdir: /Users/mkjellman/src/cassandra-dtest, inifile: pytest.ini
+plugins: timeout-1.2.1, raisesregexp-2.1, nose2pytest-1.0.8, flaky-3.4.0
+collected 1 item                                                                                                                                                
+
+auth_test.py 
+SETUP    S install_debugging_signal_handler
+    SETUP    C fixture_logging_setup
+      SETUP    F fixture_dtest_setup_overrides
+      SETUP    F fixture_log_test_name_and_date
+      SETUP    F fixture_maybe_skip_tests_requiring_novnodes
+      SETUP    F parse_dtest_config
+      SETUP    F fixture_dtest_setup (fixtures used: fixture_dtest_setup_overrides, fixture_logging_setup, parse_dtest_config)
+      SETUP    F fixture_since (fixtures used: fixture_dtest_setup)
+      SETUP    F fixture_dtest_config (fixtures used: fixture_logging_setup)
+      SETUP    F set_dtest_setup_on_function (fixtures used: fixture_dtest_config, fixture_dtest_setup)
+        auth_test.py::TestAuthRoles::()::test_create_drop_role (fixtures used: fixture_dtest_config, fixture_dtest_setup, fixture_dtest_setup_overrides, fixture_log_test_name_and_date, fixture_logging_setup, fixture_maybe_skip_tests_requiring_novnodes, fixture_since, install_debugging_signal_handler, parse_dtest_config, set_dtest_setup_on_function)
+      TEARDOWN F set_dtest_setup_on_function
+      TEARDOWN F fixture_dtest_config
+      TEARDOWN F fixture_since
+      TEARDOWN F fixture_dtest_setup
+      TEARDOWN F parse_dtest_config
+      TEARDOWN F fixture_maybe_skip_tests_requiring_novnodes
+      TEARDOWN F fixture_log_test_name_and_date
+      TEARDOWN F fixture_dtest_setup_overrides
+    TEARDOWN C fixture_logging_setup
+TEARDOWN S install_debugging_signal_handler
+===Flaky Test Report===
+
+
+===End Flaky Test Report===
+
+====================================================================== 0 tests deselected =======================================================================
+================================================================= no tests ran in 0.12 seconds ==================================================================
+```
+
+#### Instances Failing to Start (Unclean Test Teardown)
+Getting into a state (especially while writing new tests or debugging problamatic ones) where pytest/dtest fails to fully tear-down all local C* instancse that were started. You can use this handy one liner to kill all C* instances in one go:
+
+```bash
+ps aux | grep -ie CassandraDaemon | grep java | awk '{print $2}' | xargs kill
+```
+
+Links
+-------
+ * [ccm](https://github.com/pcmanus/ccm)
+ * [pytest](https://docs.pytest.org/)
+ * [Python Driver](http://datastax.github.io/python-driver/installation.html)
+ * [CQL over Thrift Driver](http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2/)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/auth_test.py
----------------------------------------------------------------------
diff --git a/auth_test.py b/auth_test.py
index 2749209..34f7212 100644
--- a/auth_test.py
+++ b/auth_test.py
@@ -2,34 +2,39 @@ import time
 from collections import namedtuple
 from datetime import datetime, timedelta
 from distutils.version import LooseVersion
-
-from nose.tools import assert_regexp_matches
+import re
+import pytest
+import logging
 
 from cassandra import AuthenticationFailed, InvalidRequest, Unauthorized
 from cassandra.cluster import NoHostAvailable
 from cassandra.protocol import ServerError, SyntaxException
 
-from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester, debug
+from dtest_setup_overrides import DTestSetupOverrides
+from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester
 from tools.assertions import (assert_all, assert_exception, assert_invalid,
                               assert_length_equal, assert_one,
                               assert_unauthorized)
-from tools.decorators import since
 from tools.jmxutils import (JolokiaAgent, make_mbean,
                             remove_perf_disable_shared_mem)
 from tools.metadata_wrapper import UpdatingKeyspaceMetadataWrapper
 from tools.misc import ImmutableMapping
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 class TestAuth(Tester):
 
-    ignore_log_patterns = (
-        # This one occurs if we do a non-rolling upgrade, the node
-        # it's trying to send the migration to hasn't started yet,
-        # and when it does, it gets replayed and everything is fine.
-        r'Can\'t send migration request: node.*is down',
-    )
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            # This one occurs if we do a non-rolling upgrade, the node
+            # it's trying to send the migration to hasn't started yet,
+            # and when it does, it gets replayed and everything is fine.
+            r'Can\'t send migration request: node.*is down',
+        )
 
-    def system_auth_ks_is_alterable_test(self):
+    def test_system_auth_ks_is_alterable(self):
         """
         * Launch a three node cluster
         * Verify the default RF of system_auth is 1
@@ -41,7 +46,7 @@ class TestAuth(Tester):
         @jira_ticket CASSANDRA-10655
         """
         self.prepare(nodes=3)
-        debug("nodes started")
+        logger.debug("nodes started")
 
         session = self.get_session(user='cassandra', password='cassandra')
         auth_metadata = UpdatingKeyspaceMetadataWrapper(
@@ -49,39 +54,39 @@ class TestAuth(Tester):
             ks_name='system_auth',
             max_schema_agreement_wait=30  # 3x the default of 10
         )
-        self.assertEquals(1, auth_metadata.replication_strategy.replication_factor)
+        assert 1 == auth_metadata.replication_strategy.replication_factor
 
         session.execute("""
             ALTER KEYSPACE system_auth
                 WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};
         """)
 
-        self.assertEquals(3, auth_metadata.replication_strategy.replication_factor)
+        assert 3 == auth_metadata.replication_strategy.replication_factor
 
         # Run repair to workaround read repair issues caused by CASSANDRA-10655
-        debug("Repairing before altering RF")
+        logger.debug("Repairing before altering RF")
         self.cluster.repair()
 
-        debug("Shutting down client session")
+        logger.debug("Shutting down client session")
         session.shutdown()
 
         # make sure schema change is persistent
-        debug("Stopping cluster..")
+        logger.debug("Stopping cluster..")
         self.cluster.stop()
-        debug("Restarting cluster..")
+        logger.debug("Restarting cluster..")
         self.cluster.start(wait_other_notice=True)
 
         # check each node directly
         for i in range(3):
-            debug('Checking node: {i}'.format(i=i))
+            logger.debug('Checking node: {i}'.format(i=i))
             node = self.cluster.nodelist()[i]
             exclusive_auth_metadata = UpdatingKeyspaceMetadataWrapper(
                 cluster=self.patient_exclusive_cql_connection(node, user='cassandra', password='cassandra').cluster,
                 ks_name='system_auth'
             )
-            self.assertEquals(3, exclusive_auth_metadata.replication_strategy.replication_factor)
+            assert 3 == exclusive_auth_metadata.replication_strategy.replication_factor
 
-    def login_test(self):
+    def test_login(self):
         """
         * Launch a one node cluster
         * Connect as the default user/password
@@ -94,15 +99,15 @@ class TestAuth(Tester):
         try:
             self.get_session(user='cassandra', password='badpassword')
         except NoHostAvailable as e:
-            self.assertIsInstance(e.errors.values()[0], AuthenticationFailed)
+            assert isinstance(list(e.errors.values())[0], AuthenticationFailed)
         try:
             self.get_session(user='doesntexist', password='doesntmatter')
         except NoHostAvailable as e:
-            self.assertIsInstance(e.errors.values()[0], AuthenticationFailed)
+            assert isinstance(list(e.errors.values())[0], AuthenticationFailed)
 
     # from 2.2 role creation is granted by CREATE_ROLE permissions, not superuser status
     @since('1.2', max_version='2.1.x')
-    def only_superuser_can_create_users_test(self):
+    def test_only_superuser_can_create_users(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -119,7 +124,7 @@ class TestAuth(Tester):
         assert_unauthorized(jackob, "CREATE USER james WITH PASSWORD '54321' NOSUPERUSER", 'Only superusers are allowed to perform CREATE (\[ROLE\|USER\]|USER) queries', )
 
     @since('1.2', max_version='2.1.x')
-    def password_authenticator_create_user_requires_password_test(self):
+    def test_password_authenticator_create_user_requires_password(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -132,7 +137,7 @@ class TestAuth(Tester):
         assert_invalid(session, "CREATE USER jackob NOSUPERUSER", 'PasswordAuthenticator requires PASSWORD option')
         session.execute("CREATE USER jackob WITH PASSWORD '12345' NOSUPERUSER")
 
-    def cant_create_existing_user_test(self):
+    def test_cant_create_existing_user(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -145,7 +150,7 @@ class TestAuth(Tester):
         session.execute("CREATE USER 'james@example.com' WITH PASSWORD '12345' NOSUPERUSER")
         assert_invalid(session, "CREATE USER 'james@example.com' WITH PASSWORD '12345' NOSUPERUSER", 'james@example.com already exists')
 
-    def list_users_test(self):
+    def test_list_users(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -163,30 +168,30 @@ class TestAuth(Tester):
         session.execute("CREATE USER dave WITH PASSWORD '12345' SUPERUSER")
 
         rows = list(session.execute("LIST USERS"))
-        self.assertEqual(5, len(rows))
+        assert 5 == len(rows)
         # {username: isSuperuser} dict.
         users = dict([(r[0], r[1]) for r in rows])
 
-        self.assertTrue(users['cassandra'])
-        self.assertFalse(users['alex'])
-        self.assertTrue(users['bob'])
-        self.assertFalse(users['cathy'])
-        self.assertTrue(users['dave'])
+        assert users['cassandra']
+        assert not users['alex']
+        assert users['bob']
+        assert not users['cathy']
+        assert users['dave']
 
         self.get_session(user='dave', password='12345')
         rows = list(session.execute("LIST USERS"))
-        self.assertEqual(5, len(rows))
+        assert 5 == len(rows)
         # {username: isSuperuser} dict.
         users = dict([(r[0], r[1]) for r in rows])
 
-        self.assertTrue(users['cassandra'])
-        self.assertFalse(users['alex'])
-        self.assertTrue(users['bob'])
-        self.assertFalse(users['cathy'])
-        self.assertTrue(users['dave'])
+        assert users['cassandra']
+        assert not users['alex']
+        assert users['bob']
+        assert not users['cathy']
+        assert users['dave']
 
     @since('2.2')
-    def handle_corrupt_role_data_test(self):
+    def test_handle_corrupt_role_data(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -208,14 +213,15 @@ class TestAuth(Tester):
 
         session.execute("UPDATE system_auth.roles SET is_superuser=null WHERE role='bob'")
 
-        self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Invalid metadata has been detected for role bob']
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            r'Invalid metadata has been detected for role bob']
         assert_exception(session, "LIST USERS", "Invalid metadata has been detected for role", expected=(ServerError))
         try:
             self.get_session(user='bob', password='12345')
         except NoHostAvailable as e:
-            self.assertIsInstance(e.errors.values()[0], AuthenticationFailed)
+            assert isinstance(list(e.errors.values())[0], AuthenticationFailed)
 
-    def user_cant_drop_themselves_test(self):
+    def test_user_cant_drop_themselves(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -229,7 +235,7 @@ class TestAuth(Tester):
 
     # from 2.2 role deletion is granted by DROP_ROLE permissions, not superuser status
     @since('1.2', max_version='2.1.x')
-    def only_superusers_can_drop_users_test(self):
+    def test_only_superusers_can_drop_users(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -245,19 +251,19 @@ class TestAuth(Tester):
         cassandra.execute("CREATE USER cathy WITH PASSWORD '12345' NOSUPERUSER")
         cassandra.execute("CREATE USER dave WITH PASSWORD '12345' NOSUPERUSER")
         rows = list(cassandra.execute("LIST USERS"))
-        self.assertEqual(3, len(rows))
+        assert 3 == len(rows)
 
         cathy = self.get_session(user='cathy', password='12345')
         assert_unauthorized(cathy, 'DROP USER dave', 'Only superusers are allowed to perform DROP (\[ROLE\|USER\]|USER) queries')
 
         rows = list(cassandra.execute("LIST USERS"))
-        self.assertEqual(3, len(rows))
+        assert 3 == len(rows)
 
         cassandra.execute('DROP USER dave')
         rows = list(cassandra.execute("LIST USERS"))
-        self.assertEqual(2, len(rows))
+        assert 2 == len(rows)
 
-    def dropping_nonexistent_user_throws_exception_test(self):
+    def test_dropping_nonexistent_user_throws_exception(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -268,7 +274,7 @@ class TestAuth(Tester):
         session = self.get_session(user='cassandra', password='cassandra')
         assert_invalid(session, 'DROP USER nonexistent', "nonexistent doesn't exist")
 
-    def drop_user_case_sensitive_test(self):
+    def test_drop_user_case_sensitive(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -284,7 +290,7 @@ class TestAuth(Tester):
 
         cassandra.execute("DROP USER Test")
         rows = [x[0] for x in list(cassandra.execute("LIST USERS"))]
-        self.assertItemsEqual(rows, ['cassandra'])
+        assert rows == ['cassandra']
 
         cassandra.execute("CREATE USER test WITH PASSWORD '12345'")
 
@@ -293,9 +299,9 @@ class TestAuth(Tester):
 
         cassandra.execute("DROP USER test")
         rows = [x[0] for x in list(cassandra.execute("LIST USERS"))]
-        self.assertItemsEqual(rows, ['cassandra'])
+        assert rows == ['cassandra']
 
-    def alter_user_case_sensitive_test(self):
+    def test_alter_user_case_sensitive(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -315,7 +321,7 @@ class TestAuth(Tester):
         assert_invalid(cassandra, "ALTER USER TEST WITH PASSWORD '12345'")
         cassandra.execute("ALTER USER test WITH PASSWORD '54321'")
 
-    def regular_users_can_alter_their_passwords_only_test(self):
+    def test_regular_users_can_alter_their_passwords_only(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -336,7 +342,7 @@ class TestAuth(Tester):
         assert_unauthorized(cathy, "ALTER USER bob WITH PASSWORD 'cantchangeit'",
                             "You aren't allowed to alter this user|User cathy does not have sufficient privileges to perform the requested operation")
 
-    def users_cant_alter_their_superuser_status_test(self):
+    def test_users_cant_alter_their_superuser_status(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -347,7 +353,7 @@ class TestAuth(Tester):
         session = self.get_session(user='cassandra', password='cassandra')
         assert_unauthorized(session, "ALTER USER cassandra NOSUPERUSER", "You aren't allowed to alter your own superuser status")
 
-    def only_superuser_alters_superuser_status_test(self):
+    def test_only_superuser_alters_superuser_status(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -366,7 +372,7 @@ class TestAuth(Tester):
 
         cassandra.execute("ALTER USER cathy SUPERUSER")
 
-    def altering_nonexistent_user_throws_exception_test(self):
+    def test_altering_nonexistent_user_throws_exception(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -377,7 +383,7 @@ class TestAuth(Tester):
         session = self.get_session(user='cassandra', password='cassandra')
         assert_invalid(session, "ALTER USER nonexistent WITH PASSWORD 'doesn''tmatter'", "nonexistent doesn't exist")
 
-    def conditional_create_drop_user_test(self):
+    def test_conditional_create_drop_user(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -404,7 +410,7 @@ class TestAuth(Tester):
         session.execute("DROP USER IF EXISTS aleksey")
         assert_one(session, "LIST USERS", ['cassandra', True])
 
-    def create_ks_auth_test(self):
+    def test_create_ks_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -427,7 +433,7 @@ class TestAuth(Tester):
         cassandra.execute("GRANT CREATE ON ALL KEYSPACES TO cathy")
         cathy.execute("""CREATE KEYSPACE ks WITH replication = {'class':'SimpleStrategy', 'replication_factor':1}""")
 
-    def create_cf_auth_test(self):
+    def test_create_cf_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -450,7 +456,7 @@ class TestAuth(Tester):
         cassandra.execute("GRANT CREATE ON KEYSPACE ks TO cathy")
         cathy.execute("CREATE TABLE ks.cf (id int primary key)")
 
-    def alter_ks_auth_test(self):
+    def test_alter_ks_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -474,7 +480,7 @@ class TestAuth(Tester):
         cassandra.execute("GRANT ALTER ON KEYSPACE ks TO cathy")
         cathy.execute("ALTER KEYSPACE ks WITH replication = {'class':'SimpleStrategy', 'replication_factor':2}")
 
-    def alter_cf_auth_test(self):
+    def test_alter_cf_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -515,7 +521,7 @@ class TestAuth(Tester):
         cathy.execute("DROP INDEX cf_val_idx")
 
     @since('3.0')
-    def materialized_views_auth_test(self):
+    def test_materialized_views_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -562,7 +568,7 @@ class TestAuth(Tester):
         cassandra.execute("GRANT ALTER ON ks.cf TO cathy")
         cathy.execute("DROP MATERIALIZED VIEW mv1")
 
-    def drop_ks_auth_test(self):
+    def test_drop_ks_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -584,7 +590,7 @@ class TestAuth(Tester):
         cassandra.execute("GRANT DROP ON KEYSPACE ks TO cathy")
         cathy.execute("DROP KEYSPACE ks")
 
-    def drop_cf_auth_test(self):
+    def test_drop_cf_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -607,7 +613,7 @@ class TestAuth(Tester):
         cassandra.execute("GRANT DROP ON ks.cf TO cathy")
         cathy.execute("DROP TABLE ks.cf")
 
-    def modify_and_select_auth_test(self):
+    def test_modify_and_select_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -631,7 +637,7 @@ class TestAuth(Tester):
 
         cassandra.execute("GRANT SELECT ON ks.cf TO cathy")
         rows = list(cathy.execute("SELECT * FROM ks.cf"))
-        self.assertEquals(0, len(rows))
+        assert 0 == len(rows)
 
         assert_unauthorized(cathy, "INSERT INTO ks.cf (id, val) VALUES (0, 0)", "User cathy has no MODIFY permission on <table ks.cf> or any of its parents")
 
@@ -645,17 +651,17 @@ class TestAuth(Tester):
         cathy.execute("INSERT INTO ks.cf (id, val) VALUES (0, 0)")
         cathy.execute("UPDATE ks.cf SET val = 1 WHERE id = 1")
         rows = list(cathy.execute("SELECT * FROM ks.cf"))
-        self.assertEquals(2, len(rows))
+        assert 2 == len(rows)
 
         cathy.execute("DELETE FROM ks.cf WHERE id = 1")
         rows = list(cathy.execute("SELECT * FROM ks.cf"))
-        self.assertEquals(1, len(rows))
+        assert 1 == len(rows)
 
         rows = list(cathy.execute("TRUNCATE ks.cf"))
-        self.assertItemsEqual(rows, [])
+        assert rows == []
 
     @since('2.2')
-    def grant_revoke_without_ks_specified_test(self):
+    def test_grant_revoke_without_ks_specified(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -687,7 +693,7 @@ class TestAuth(Tester):
         cathy.execute("GRANT SELECT ON cf TO bob")
         bob.execute("SELECT * FROM ks.cf")
 
-    def grant_revoke_auth_test(self):
+    def test_grant_revoke_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -725,7 +731,7 @@ class TestAuth(Tester):
         bob = self.get_session(user='bob', password='12345')
         bob.execute("SELECT * FROM ks.cf")
 
-    def grant_revoke_nonexistent_user_or_ks_test(self):
+    def test_grant_revoke_nonexistent_user_or_ks(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -747,7 +753,7 @@ class TestAuth(Tester):
 
         assert_invalid(cassandra, "REVOKE ALL ON KEYSPACE ks FROM nonexistent", "(User|Role) nonexistent doesn't exist")
 
-    def grant_revoke_cleanup_test(self):
+    def test_grant_revoke_cleanup(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -771,7 +777,7 @@ class TestAuth(Tester):
         cathy = self.get_session(user='cathy', password='12345')
         cathy.execute("INSERT INTO ks.cf (id, val) VALUES (0, 0)")
         rows = list(cathy.execute("SELECT * FROM ks.cf"))
-        self.assertEquals(1, len(rows))
+        assert 1 == len(rows)
 
         # drop and recreate the user, make sure permissions are gone
         cassandra.execute("DROP USER cathy")
@@ -785,7 +791,7 @@ class TestAuth(Tester):
         cassandra.execute("GRANT ALL ON ks.cf TO cathy")
         cathy.execute("INSERT INTO ks.cf (id, val) VALUES (0, 0)")
         rows = list(cathy.execute("SELECT * FROM ks.cf"))
-        self.assertEqual(1, len(rows))
+        assert 1 == len(rows)
 
         # drop and recreate the keyspace, make sure permissions are gone
         cassandra.execute("DROP KEYSPACE ks")
@@ -796,7 +802,7 @@ class TestAuth(Tester):
 
         assert_unauthorized(cathy, "SELECT * FROM ks.cf", "User cathy has no SELECT permission on <table ks.cf> or any of its parents")
 
-    def permissions_caching_test(self):
+    def test_permissions_caching(self):
         """
         * Launch a one node cluster, with a 2s permission cache
         * Connect as the default superuser
@@ -828,7 +834,7 @@ class TestAuth(Tester):
             if attempt > 3:
                 self.fail("Unable to verify cache expiry in 3 attempts, failing")
 
-            debug("Attempting to verify cache expiry, attempt #{i}".format(i=attempt))
+            logger.debug("Attempting to verify cache expiry, attempt #{i}".format(i=attempt))
             # grant SELECT to cathy
             cassandra.execute("GRANT SELECT ON ks.cf TO cathy")
             grant_time = datetime.now()
@@ -849,7 +855,7 @@ class TestAuth(Tester):
                         # legit failure
                         self.fail("Expecting query to raise an exception, but nothing was raised.")
                 except Unauthorized as e:
-                    assert_regexp_matches(str(e), "User cathy has no SELECT permission on <table ks.cf> or any of its parents")
+                    assert re.search("User cathy has no SELECT permission on <table ks.cf> or any of its parents", str(e))
 
         check_caching()
 
@@ -871,16 +877,16 @@ class TestAuth(Tester):
             try:
                 for c in cathys:
                     rows = list(c.execute("SELECT * FROM ks.cf"))
-                    self.assertEqual(0, len(rows))
+                    assert 0 == len(rows)
                 success = True
             except Unauthorized:
                 pass
             cnt += 1
             time.sleep(0.1)
 
-        self.assertTrue(success)
+        assert success
 
-    def list_permissions_test(self):
+    def test_list_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -956,7 +962,7 @@ class TestAuth(Tester):
 
         assert_unauthorized(bob, "LIST ALL PERMISSIONS OF cathy", "You are not authorized to view cathy's permissions")
 
-    def type_auth_test(self):
+    def test_type_auth(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -983,7 +989,7 @@ class TestAuth(Tester):
         cassandra.execute("GRANT DROP ON KEYSPACE ks TO cathy")
         cathy.execute("DROP TYPE ks.address")
 
-    def restart_node_doesnt_lose_auth_data_test(self):
+    def test_restart_node_doesnt_lose_auth_data(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1018,13 +1024,12 @@ class TestAuth(Tester):
         philip.execute("SELECT * FROM ks.cf")
 
     @since('3.10')
-    def auth_metrics_test(self):
+    def test_auth_metrics(self):
         """
         Success and failure metrics were added to the authentication procedure
         so as to estimate the percentage of authentication attempts that failed.
         @jira_ticket CASSANDRA-10635
         """
-
         cluster = self.cluster
         config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator',
                   'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer',
@@ -1042,13 +1047,13 @@ class TestAuth(Tester):
             failure = jmx.read_attribute(
                 make_mbean('metrics', type='Client', name='AuthFailure'), 'Count')
 
-            self.assertEqual(0, success)
-            self.assertEqual(0, failure)
+            assert 0 == success
+            assert 0 == failure
 
             try:
                 self.get_session(user='cassandra', password='wrong_password')
             except NoHostAvailable as e:
-                self.assertIsInstance(e.errors.values()[0], AuthenticationFailed)
+                assert isinstance(list(e.errors.values())[0], AuthenticationFailed)
 
             self.get_session(user='cassandra', password='cassandra')
 
@@ -1057,8 +1062,8 @@ class TestAuth(Tester):
             failure = jmx.read_attribute(
                 make_mbean('metrics', type='Client', name='AuthFailure'), 'Count')
 
-            self.assertGreater(success, 0)
-            self.assertGreater(failure, 0)
+            assert success > 0
+            assert failure > 0
 
     def prepare(self, nodes=1, permissions_validity=0):
         """
@@ -1073,7 +1078,7 @@ class TestAuth(Tester):
         self.cluster.populate(nodes).start()
 
         n = self.cluster.wait_for_any_log('Created default superuser', 25)
-        debug("Default role created by " + n.name)
+        logger.debug("Default role created by " + n.name)
 
     def get_session(self, node_idx=0, user=None, password=None):
         """
@@ -1097,7 +1102,7 @@ class TestAuth(Tester):
         """
         rows = session.execute(query)
         perms = [(str(r.username), str(r.resource), str(r.permission)) for r in rows]
-        self.assertEqual(sorted(expected), sorted(perms))
+        assert sorted(expected) == sorted(perms)
 
 
 def data_resource_creator_permissions(creator, resource):
@@ -1134,16 +1139,21 @@ cassandra_role = Role('cassandra', True, True, {})
 
 @since('2.2')
 class TestAuthRoles(Tester):
-    """
-    @jira_ticket CASSANDRA-7653
-    """
-    if CASSANDRA_VERSION_FROM_BUILD >= '3.0':
-        cluster_options = ImmutableMapping({'enable_user_defined_functions': 'true',
-                                            'enable_scripted_user_defined_functions': 'true'})
-    else:
-        cluster_options = ImmutableMapping({'enable_user_defined_functions': 'true'})
 
-    def create_drop_role_test(self):
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        """
+        @jira_ticket CASSANDRA-7653
+        """
+        dtest_setup_overrides = DTestSetupOverrides()
+        if CASSANDRA_VERSION_FROM_BUILD >= '3.0':
+            dtest_setup_overrides.cluster_options = ImmutableMapping({'enable_user_defined_functions': 'true',
+                                                'enable_scripted_user_defined_functions': 'true'})
+        else:
+            dtest_setup_overrides.cluster_options.cluster_options = ImmutableMapping({'enable_user_defined_functions': 'true'})
+        return dtest_setup_overrides
+
+    def test_create_drop_role(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1160,7 +1170,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("DROP ROLE role1")
         assert_one(cassandra, "LIST ROLES", list(cassandra_role))
 
-    def conditional_create_drop_role_test(self):
+    def test_conditional_create_drop_role(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1181,7 +1191,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("DROP ROLE IF EXISTS role1")
         assert_one(cassandra, "LIST ROLES", list(cassandra_role))
 
-    def create_drop_role_validation_test(self):
+    def test_create_drop_role_validation(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1212,7 +1222,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("DROP ROLE role1")
         assert_invalid(cassandra, "DROP ROLE role1", "role1 doesn't exist")
 
-    def role_admin_validation_test(self):
+    def test_role_admin_validation(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1275,7 +1285,7 @@ class TestAuthRoles(Tester):
         assert_unauthorized(mike, "CREATE ROLE role3 WITH LOGIN = false",
                             "User mike does not have sufficient privileges to perform the requested operation")
 
-    def creator_of_db_resource_granted_all_permissions_test(self):
+    def test_creator_of_db_resource_granted_all_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1317,7 +1327,7 @@ class TestAuthRoles(Tester):
                                        cassandra,
                                        "LIST ALL PERMISSIONS")
 
-    def create_and_grant_roles_with_superuser_status_test(self):
+    def test_create_and_grant_roles_with_superuser_status(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1347,7 +1357,7 @@ class TestAuthRoles(Tester):
                                                       ['non_superuser', False, False, {}],
                                                       ['role1', False, False, {}]])
 
-    def drop_and_revoke_roles_with_superuser_status_test(self):
+    def test_drop_and_revoke_roles_with_superuser_status(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1374,7 +1384,7 @@ class TestAuthRoles(Tester):
         mike.execute("DROP ROLE non_superuser")
         mike.execute("DROP ROLE role1")
 
-    def drop_role_removes_memberships_test(self):
+    def test_drop_role_removes_memberships(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1406,7 +1416,7 @@ class TestAuthRoles(Tester):
         assert_one(cassandra, "LIST ROLES OF mike", list(mike_role))
         assert_all(cassandra, "LIST ROLES", [list(cassandra_role), list(mike_role), list(role2_role)])
 
-    def drop_role_revokes_permissions_granted_on_it_test(self):
+    def test_drop_role_revokes_permissions_granted_on_it(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1431,9 +1441,9 @@ class TestAuthRoles(Tester):
 
         cassandra.execute("DROP ROLE role1")
         cassandra.execute("DROP ROLE role2")
-        self.assertItemsEqual(list(cassandra.execute("LIST ALL PERMISSIONS OF mike")), [])
+        assert list(cassandra.execute("LIST ALL PERMISSIONS OF mike")) == []
 
-    def grant_revoke_roles_test(self):
+    def test_grant_revoke_roles(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1460,7 +1470,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("REVOKE role1 FROM role2")
         assert_one(cassandra, "LIST ROLES OF role2", list(role2_role))
 
-    def grant_revoke_role_validation_test(self):
+    def test_grant_revoke_role_validation(self):
         """
         * Launch a one node cluster
         * Connect as the default superusers
@@ -1501,7 +1511,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("REVOKE role1 FROM john")
         mike.execute("REVOKE role2 from john")
 
-    def list_roles_test(self):
+    def test_list_roles(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1540,7 +1550,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("GRANT DESCRIBE ON ALL ROLES TO mike")
         assert_all(mike, "LIST ROLES", [list(cassandra_role), list(mike_role), list(role1_role), list(role2_role)])
 
-    def grant_revoke_permissions_test(self):
+    def test_grant_revoke_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1578,7 +1588,7 @@ class TestAuthRoles(Tester):
                             "INSERT INTO ks.cf (id, val) VALUES (0, 0)",
                             "mike has no MODIFY permission on <table ks.cf> or any of its parents")
 
-    def filter_granted_permissions_by_resource_type_test(self):
+    def test_filter_granted_permissions_by_resource_type(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1694,7 +1704,7 @@ class TestAuthRoles(Tester):
                                        "LIST ALL PERMISSIONS OF mike")
         cassandra.execute("REVOKE ALL ON FUNCTION ks.agg_func(int) FROM mike")
 
-    def list_permissions_test(self):
+    def test_list_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1760,7 +1770,7 @@ class TestAuthRoles(Tester):
                                        "LIST ALTER PERMISSION ON ROLE role1 OF role2")
         # make sure ALTER on role2 is excluded properly when OF is for another role
         cassandra.execute("CREATE ROLE role3 WITH SUPERUSER = false AND LOGIN = false")
-        self.assertItemsEqual(list(cassandra.execute("LIST ALTER PERMISSION ON ROLE role1 OF role3")), [])
+        assert list(cassandra.execute("LIST ALTER PERMISSION ON ROLE role1 OF role3")) == []
 
         # now check users can list their own permissions
         mike = self.get_session(user='mike', password='12345')
@@ -1771,7 +1781,7 @@ class TestAuthRoles(Tester):
                                        mike,
                                        "LIST ALL PERMISSIONS OF mike")
 
-    def list_permissions_validation_test(self):
+    def test_list_permissions_validation(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1815,7 +1825,7 @@ class TestAuthRoles(Tester):
                             "LIST ALL PERMISSIONS OF john",
                             "You are not authorized to view john's permissions")
 
-    def role_caching_authenticated_user_test(self):
+    def test_role_caching_authenticated_user(self):
         """
         This test is to show that the role caching in AuthenticatedUser
         works correctly and revokes the roles from a logged in user
@@ -1853,7 +1863,7 @@ class TestAuthRoles(Tester):
             except Unauthorized as e:
                 unauthorized = e
 
-        self.assertIsNotNone(unauthorized)
+        assert unauthorized is not None
 
     def drop_non_existent_role_should_not_update_cache(self):
         """
@@ -1883,7 +1893,7 @@ class TestAuthRoles(Tester):
         mike = self.get_session(user='mike', password='12345')
         mike.execute("SELECT * FROM ks.cf")
 
-    def prevent_circular_grants_test(self):
+    def test_prevent_circular_grants(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1906,7 +1916,7 @@ class TestAuthRoles(Tester):
                        "mike is a member of role2",
                        InvalidRequest)
 
-    def create_user_as_alias_for_create_role_test(self):
+    def test_create_user_as_alias_for_create_role(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1920,7 +1930,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("CREATE USER super_user WITH PASSWORD '12345' SUPERUSER")
         assert_one(cassandra, "LIST ROLES OF super_user", ["super_user", True, True, {}])
 
-    def role_name_test(self):
+    def test_role_name(self):
         """
         Simple test to verify the behaviour of quoting when creating roles & users
         * Launch a one node cluster
@@ -1959,7 +1969,7 @@ class TestAuthRoles(Tester):
         self.get_session(user='USER2', password='12345')
         self.assert_unauthenticated('User2', '12345')
 
-    def role_requires_login_privilege_to_authenticate_test(self):
+    def test_role_requires_login_privilege_to_authenticate(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -1982,7 +1992,7 @@ class TestAuthRoles(Tester):
         assert_one(cassandra, "LIST ROLES OF mike", ["mike", False, True, {}])
         self.get_session(user='mike', password='12345')
 
-    def roles_do_not_inherit_login_privilege_test(self):
+    def test_roles_do_not_inherit_login_privilege(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2002,7 +2012,7 @@ class TestAuthRoles(Tester):
 
         self.assert_login_not_allowed("mike", "12345")
 
-    def role_requires_password_to_login_test(self):
+    def test_role_requires_password_to_login(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2021,7 +2031,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("ALTER ROLE mike WITH PASSWORD = '12345'")
         self.get_session(user='mike', password='12345')
 
-    def superuser_status_is_inherited_test(self):
+    def test_superuser_status_is_inherited(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2048,7 +2058,7 @@ class TestAuthRoles(Tester):
                                         ["db_admin", True, False, {}],
                                         list(mike_role)])
 
-    def list_users_considers_inherited_superuser_status_test(self):
+    def test_list_users_considers_inherited_superuser_status(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2065,7 +2075,7 @@ class TestAuthRoles(Tester):
                                              ["mike", True]])
 
     # UDF permissions tests # TODO move to separate fixture & refactor this + auth_test.py
-    def grant_revoke_udf_permissions_test(self):
+    def test_grant_revoke_udf_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2111,7 +2121,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("REVOKE EXECUTE PERMISSION ON ALL FUNCTIONS FROM mike")
         self.assert_no_permissions(cassandra, "LIST ALL PERMISSIONS OF mike")
 
-    def grant_revoke_are_idempotent_test(self):
+    def test_grant_revoke_are_idempotent(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2135,7 +2145,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("REVOKE EXECUTE ON FUNCTION ks.plus_one(int) FROM mike")
         self.assert_no_permissions(cassandra, "LIST ALL PERMISSIONS OF mike")
 
-    def function_resource_hierarchy_permissions_test(self):
+    def test_function_resource_hierarchy_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2197,7 +2207,7 @@ class TestAuthRoles(Tester):
         mike.execute(select_one)
         mike.execute(select_two)
 
-    def udf_permissions_validation_test(self):
+    def test_udf_permissions_validation(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2262,7 +2272,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("GRANT CREATE ON ALL FUNCTIONS IN KEYSPACE ks TO mike")
         mike.execute(cql)
 
-    def drop_role_cleans_up_udf_permissions_test(self):
+    def test_drop_role_cleans_up_udf_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2291,7 +2301,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("CREATE ROLE mike WITH PASSWORD = '12345' AND LOGIN = true")
         self.assert_no_permissions(cassandra, "LIST ALL PERMISSIONS OF mike")
 
-    def drop_function_and_keyspace_cleans_up_udf_permissions_test(self):
+    def test_drop_function_and_keyspace_cleans_up_udf_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2323,7 +2333,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("DROP KEYSPACE ks")
         self.assert_no_permissions(cassandra, "LIST ALL PERMISSIONS OF mike")
 
-    def udf_with_overloads_permissions_test(self):
+    def test_udf_with_overloads_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2367,7 +2377,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("DROP FUNCTION ks.plus_one(int)")
         self.assert_no_permissions(cassandra, "LIST ALL PERMISSIONS OF mike")
 
-    def drop_keyspace_cleans_up_function_level_permissions_test(self):
+    def test_drop_keyspace_cleans_up_function_level_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2393,31 +2403,31 @@ class TestAuthRoles(Tester):
         cassandra.execute("DROP KEYSPACE ks")
         self.assert_no_permissions(cassandra, "LIST ALL PERMISSIONS OF mike")
 
-    def udf_permissions_in_selection_test(self):
+    def test_udf_permissions_in_selection(self):
         """
         Verify EXECUTE permission works in a SELECT when UDF is one of the columns requested
         """
         self.verify_udf_permissions("SELECT k, v, ks.plus_one(v) FROM ks.t1 WHERE k = 1")
 
-    def udf_permissions_in_select_where_clause_test(self):
+    def test_udf_permissions_in_select_where_clause(self):
         """
         Verify EXECUTE permission works in a SELECT when UDF is in the WHERE clause
         """
         self.verify_udf_permissions("SELECT k, v FROM ks.t1 WHERE k = ks.plus_one(0)")
 
-    def udf_permissions_in_insert_test(self):
+    def test_udf_permissions_in_insert(self):
         """
         Verify EXECUTE permission works in an INSERT when UDF is in the VALUES
         """
         self.verify_udf_permissions("INSERT INTO ks.t1 (k, v) VALUES (1, ks.plus_one(1))")
 
-    def udf_permissions_in_update_test(self):
+    def test_udf_permissions_in_update(self):
         """
         Verify EXECUTE permission works in an UPDATE when UDF is in the SET and WHERE clauses
         """
         self.verify_udf_permissions("UPDATE ks.t1 SET v = ks.plus_one(2) WHERE k = ks.plus_one(0)")
 
-    def udf_permissions_in_delete_test(self):
+    def test_udf_permissions_in_delete(self):
         """
         Verify EXECUTE permission works in a DELETE when UDF is in the WHERE clause
         """
@@ -2447,7 +2457,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("GRANT EXECUTE ON FUNCTION ks.plus_one(int) TO mike")
         return mike.execute(cql)
 
-    def inheritence_of_udf_permissions_test(self):
+    def test_inheritence_of_udf_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2471,7 +2481,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("GRANT function_user TO mike")
         assert_one(mike, select, [1, 1, 2])
 
-    def builtin_functions_require_no_special_permissions_test(self):
+    def test_builtin_functions_require_no_special_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2486,7 +2496,7 @@ class TestAuthRoles(Tester):
         cassandra.execute("GRANT ALL PERMISSIONS ON ks.t1 TO mike")
         assert_one(mike, "SELECT * from ks.t1 WHERE k=blobasint(intasblob(1))", [1, 1])
 
-    def disallow_grant_revoke_on_builtin_functions_test(self):
+    def test_disallow_grant_revoke_on_builtin_functions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2510,7 +2520,7 @@ class TestAuthRoles(Tester):
                        "Altering permissions on builtin functions is not supported",
                        InvalidRequest)
 
-    def disallow_grant_execute_on_non_function_resources_test(self):
+    def test_disallow_grant_execute_on_non_function_resources(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2540,7 +2550,7 @@ class TestAuthRoles(Tester):
                        "Resource type RoleResource does not support any of the requested permissions",
                        SyntaxException)
 
-    def aggregate_function_permissions_test(self):
+    def test_aggregate_function_permissions(self):
         """
         * Launch a one node cluster
         * Connect as the default superuser
@@ -2609,9 +2619,9 @@ class TestAuthRoles(Tester):
         cassandra.execute("DROP AGGREGATE ks.simple_aggregate(int)")
         all_perms = list(cassandra.execute("LIST ALL PERMISSIONS OF mike"))
         for p in agg_perms:
-            self.assertFalse(p in all_perms, msg="Perm {p} found, but should be removed".format(p=p))
+            assert not p in all_perms, "Perm {p} found, but should be removed".format(p=p)
 
-    def ignore_invalid_roles_test(self):
+    def test_ignore_invalid_roles(self):
         """
         The system_auth.roles table includes a set of roles of which each role
         is a member. If that list were to get out of sync, so that it indicated
@@ -2619,7 +2629,6 @@ class TestAuthRoles(Tester):
         table, then the result of LIST ROLES OF roleA should not include roleB
         @jira_ticket CASSANDRA-9551
         """
-
         self.prepare()
         cassandra = self.get_session(user='cassandra', password='cassandra')
         cassandra.execute("CREATE ROLE mike WITH LOGIN = true")
@@ -2632,10 +2641,10 @@ class TestAuthRoles(Tester):
         session.execute("CREATE TABLE ks.t1 (k int PRIMARY KEY, v int)")
 
     def assert_unauthenticated(self, user, password):
-        with self.assertRaises(NoHostAvailable) as response:
+        with pytest.raises(NoHostAvailable) as response:
             node = self.cluster.nodelist()[0]
             self.cql_connection(node, user=user, password=password)
-        host, error = response.exception.errors.popitem()
+        host, error = response._excinfo[1].errors.popitem()
 
         message = "Provided username {user} and/or password are incorrect".format(user=user)\
             if node.cluster.version() >= LooseVersion('3.10') \
@@ -2644,19 +2653,19 @@ class TestAuthRoles(Tester):
                   '[Bad credentials] message="{message}"'.format(host=host, message=message)
 
         assert isinstance(error, AuthenticationFailed), "Expected AuthenticationFailed, got {error}".format(error=error)
-        self.assertIn(pattern, error.message)
+        assert pattern in repr(error)
 
     def assert_login_not_allowed(self, user, password):
-        with self.assertRaises(NoHostAvailable) as response:
+        with pytest.raises(NoHostAvailable) as response:
             node = self.cluster.nodelist()[0]
             self.cql_connection(node, user=user, password=password)
-        host, error = response.exception.errors.popitem()
+        host, error = response._excinfo[1].errors.popitem()
 
         pattern = 'Failed to authenticate to {host}: Error from server: code=0100 ' \
                   '[Bad credentials] message="{user} is not permitted to log in"'.format(host=host, user=user)
 
         assert isinstance(error, AuthenticationFailed), "Expected AuthenticationFailed, got {error}".format(error=error)
-        self.assertIn(pattern, error.message)
+        assert pattern in repr(error)
 
     def get_session(self, node_idx=0, user=None, password=None):
         """
@@ -2684,10 +2693,10 @@ class TestAuthRoles(Tester):
     def assert_permissions_listed(self, expected, session, query):
         rows = session.execute(query)
         perms = [(str(r.role), str(r.resource), str(r.permission)) for r in rows]
-        self.assertEqual(sorted(expected), sorted(perms))
+        assert sorted(expected) == sorted(perms)
 
     def assert_no_permissions(self, session, query):
-        self.assertItemsEqual(list(session.execute(query)), [])
+        assert list(session.execute(query)) == []
 
 
 def role_creator_permissions(creator, role):


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[31/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cql_test.py
----------------------------------------------------------------------
diff --git a/cql_test.py b/cql_test.py
new file mode 100644
index 0000000..f49e627
--- /dev/null
+++ b/cql_test.py
@@ -0,0 +1,1503 @@
+import itertools
+import struct
+import time
+import pytest
+import logging
+
+from flaky import flaky
+
+from cassandra import ConsistencyLevel, InvalidRequest
+from cassandra.metadata import NetworkTopologyStrategy, SimpleStrategy
+from cassandra.policies import FallthroughRetryPolicy
+from cassandra.query import SimpleStatement
+
+from dtest import Tester, create_ks
+from distutils.version import LooseVersion
+from thrift_bindings.thrift010.ttypes import \
+    ConsistencyLevel as ThriftConsistencyLevel
+from thrift_bindings.thrift010.ttypes import (CfDef, Column, ColumnOrSuperColumn,
+                                        Mutation)
+from thrift_test import get_thrift_client
+from tools.assertions import (assert_all, assert_invalid, assert_length_equal,
+                              assert_none, assert_one, assert_unavailable)
+
+from tools.data import rows_to_list
+from tools.metadata_wrapper import (UpdatingClusterMetadataWrapper,
+                                    UpdatingKeyspaceMetadataWrapper,
+                                    UpdatingTableMetadataWrapper)
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
+
+class CQLTester(Tester):
+
+    def prepare(self, ordered=False, create_keyspace=True, use_cache=False,
+                nodes=1, rf=1, protocol_version=None, user=None, password=None,
+                start_rpc=False, **kwargs):
+        cluster = self.cluster
+
+        if ordered:
+            cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")
+
+        if use_cache:
+            cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})
+
+        if start_rpc:
+            cluster.set_configuration_options(values={'start_rpc': True})
+
+        if user:
+            config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator',
+                      'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer',
+                      'permissions_validity_in_ms': 0}
+            cluster.set_configuration_options(values=config)
+
+        if not cluster.nodelist():
+            cluster.populate(nodes).start(wait_for_binary_proto=True)
+        node1 = cluster.nodelist()[0]
+
+        session = self.patient_cql_connection(node1, protocol_version=protocol_version, user=user, password=password)
+        if create_keyspace:
+            create_ks(session, 'ks', rf)
+        return session
+
+
+class TestCQL(CQLTester):
+    """
+    Each CQL statement is exercised at least once in order to
+    ensure we execute the code path in StorageProxy.
+    # TODO This probably isn't true anymore?
+    Note that in depth CQL validation is done in Java unit tests,
+    see CASSANDRA-9160.
+
+    # TODO I'm not convinced we need these. Seems like all the functionality
+    #      is covered in greater detail in other test classes.
+    """
+
+    def test_keyspace(self):
+        """
+        Smoke test that basic keyspace operations work:
+
+        - create a keyspace
+        - assert keyspace exists and is configured as expected with the driver metadata API
+        - ALTER it
+        - assert keyspace was correctly altered with the driver metadata API
+        - DROP it
+        - assert keyspace is no longer in keyspace metadata
+        """
+        session = self.prepare(create_keyspace=False)
+        meta = UpdatingClusterMetadataWrapper(session.cluster)
+
+        assert 'ks' not in meta.keyspaces
+        session.execute("CREATE KEYSPACE ks WITH replication = "
+                        "{ 'class':'SimpleStrategy', 'replication_factor':1} "
+                        "AND DURABLE_WRITES = true")
+        assert 'ks' in meta.keyspaces
+
+        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
+        assert ks_meta.durable_writes
+        assert isinstance(ks_meta.replication_strategy, SimpleStrategy)
+
+        session.execute("ALTER KEYSPACE ks WITH replication = "
+                        "{ 'class' : 'NetworkTopologyStrategy', 'datacenter1' : 1 } "
+                        "AND DURABLE_WRITES = false")
+        assert not ks_meta.durable_writes
+        assert isinstance(ks_meta.replication_strategy, NetworkTopologyStrategy)
+
+        session.execute("DROP KEYSPACE ks")
+        assert 'ks' not in meta.keyspaces
+
+    def test_table(self):
+        """
+        Smoke test that basic table operations work:
+
+        - create a table
+        - ALTER the table adding a column
+        - insert 10 values
+        - SELECT * and assert the values are there
+        - TRUNCATE the table
+        - SELECT * and assert there are no values
+        - DROP the table
+        - SELECT * and assert the statement raises an InvalidRequest
+        # TODO run SELECTs to make sure each statement works
+        """
+        session = self.prepare()
+
+        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
+
+        session.execute("CREATE TABLE test1 (k int PRIMARY KEY, v1 int)")
+        assert 'test1' in ks_meta.tables
+
+        t1_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test1')
+
+        session.execute("ALTER TABLE test1 ADD v2 int")
+        assert 'v2' in t1_meta.columns
+
+        for i in range(0, 10):
+            session.execute("INSERT INTO test1 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i))
+
+        assert_all(session, "SELECT * FROM test1", [[i, i, i] for i in range(0, 10)], ignore_order=True)
+
+        session.execute("TRUNCATE test1")
+
+        assert_none(session, "SELECT * FROM test1")
+
+        session.execute("DROP TABLE test1")
+        assert 'test1' not in ks_meta.tables
+
+    @since("2.0", max_version="3.X")
+    def test_table_compact_storage(self):
+        """
+        Smoke test that basic table operations work:
+
+        - create a table with COMPACT STORAGE
+        - insert 10 values
+        - SELECT * and assert the values are there
+        - TRUNCATE the table
+        - SELECT * and assert there are no values
+        - DROP the table
+        - SELECT * and assert the statement raises an InvalidRequest
+        # TODO run SELECTs to make sure each statement works
+        """
+        session = self.prepare()
+
+        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
+
+        session.execute("CREATE TABLE test2 (k int, c1 int, v1 int, PRIMARY KEY (k, c1)) WITH COMPACT STORAGE")
+        assert 'test2' in ks_meta.tables
+
+        for i in range(0, 10):
+            session.execute("INSERT INTO test2 (k, c1, v1) VALUES ({i}, {i}, {i})".format(i=i))
+
+        assert_all(session, "SELECT * FROM test2", [[i, i, i] for i in range(0, 10)], ignore_order=True)
+
+        session.execute("TRUNCATE test2")
+
+        assert_none(session, "SELECT * FROM test2")
+
+        session.execute("DROP TABLE test2")
+        assert 'test2' not in ks_meta.tables
+
+    def test_index(self):
+        """
+        Smoke test CQL statements related to indexes:
+
+        - CREATE a table
+        - CREATE an index on that table
+        - INSERT 10 values into the table
+        - SELECT from the table over the indexed value and assert the expected values come back
+        - drop the index
+        - assert SELECTing over the indexed value raises an InvalidRequest
+        # TODO run SELECTs to make sure each statement works
+        """
+        session = self.prepare()
+
+        session.execute("CREATE TABLE test3 (k int PRIMARY KEY, v1 int, v2 int)")
+        table_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test3')
+        session.execute("CREATE INDEX testidx ON test3 (v1)")
+        assert 'testidx' in table_meta.indexes
+
+        for i in range(0, 10):
+            session.execute("INSERT INTO test3 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i))
+
+        assert_one(session, "SELECT * FROM test3 WHERE v1 = 0", [0, 0, 0])
+
+        session.execute("DROP INDEX testidx")
+        assert 'testidx' not in table_meta.indexes
+
+    def test_type(self):
+        """
+        Smoke test basic TYPE operations:
+
+        - CREATE a type
+        - CREATE a table using that type
+        - ALTER the type and CREATE another table
+        - DROP the tables and type
+        - CREATE another table using the DROPped type and assert it fails with an InvalidRequest
+        # TODO run SELECTs to make sure each statement works
+        # TODO is this even necessary given the existence of the auth_tests?
+        """
+        session = self.prepare()
+        # even though we only ever use the user_types attribute of this object,
+        # we have to access it each time, because attribute access is how the
+        # value is updated
+        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
+
+        session.execute("CREATE TYPE address_t (street text, city text, zip_code int)")
+        assert 'address_t' in ks_meta.user_types
+
+        session.execute("CREATE TABLE test4 (id int PRIMARY KEY, address frozen<address_t>)")
+
+        session.execute("ALTER TYPE address_t ADD phones set<text>")
+        assert 'phones' in ks_meta.user_types['address_t'].field_names
+
+        # drop the table so we can safely drop the type it uses
+        session.execute("DROP TABLE test4")
+
+        session.execute("DROP TYPE address_t")
+        assert 'address_t' not in ks_meta.user_types
+
+    def test_user(self):
+        """
+        Smoke test for basic USER queries:
+
+        - get a session as the default superuser
+        - CREATE a user
+        - ALTER that user by giving it a different password
+        - DROP that user
+        # TODO list users after each to make sure each statement works
+        """
+        session = self.prepare(user='cassandra', password='cassandra')
+        node1 = self.cluster.nodelist()[0]
+
+        def get_usernames():
+            return [user.name for user in session.execute('LIST USERS')]
+
+        assert 'user1' not in get_usernames()
+
+        session.execute("CREATE USER user1 WITH PASSWORD 'secret'")
+        # use patient to retry until it works, because it takes some time for
+        # the CREATE to take
+        self.patient_cql_connection(node1, user='user1', password='secret')
+
+        session.execute("ALTER USER user1 WITH PASSWORD 'secret^2'")
+        # use patient for same reason as above
+        self.patient_cql_connection(node1, user='user1', password='secret^2')
+
+        session.execute("DROP USER user1")
+        assert 'user1' not in get_usernames()
+
+    def test_statements(self):
+        """
+        Smoke test SELECT and UPDATE statements:
+
+        - create a table
+        - insert 20 rows into the table
+        - run SELECT COUNT queries and assert they return the correct values
+            - bare and with IN and equality conditions
+        - run SELECT * queries with = conditions
+        - run UPDATE queries
+        - SELECT * and assert the UPDATEd values are there
+        - DELETE with a = condition
+        - SELECT the deleted values and make sure nothing is returned
+        # TODO run SELECTs to make sure each statement works
+        """
+        session = self.prepare()
+
+        session.execute("CREATE TABLE test7 (kind text, time int, v1 int, v2 int, PRIMARY KEY(kind, time) )")
+
+        for i in range(0, 10):
+            session.execute("INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev1', {i}, {i}, {i})".format(i=i))
+            session.execute("INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev2', {i}, {i}, {i})".format(i=i))
+
+        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [10])
+
+        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2')", [20])
+
+        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2') AND time=0", [2])
+
+        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, i, i] for i in range(0, 10)])
+
+        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev2'", [['ev2', i, i, i] for i in range(0, 10)])
+
+        for i in range(0, 10):
+            session.execute("UPDATE test7 SET v1 = 0, v2 = 0 where kind = 'ev1' AND time={i}".format(i=i))
+
+        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, 0, 0] for i in range(0, 10)])
+
+        session.execute("DELETE FROM test7 WHERE kind = 'ev1'")
+        assert_none(session, "SELECT * FROM test7 WHERE kind = 'ev1'")
+
+        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [0])
+
+    @since('3.10')
+    def test_partition_key_allow_filtering(self):
+        """
+        Filtering with unrestricted parts of partition keys
+
+        @jira_ticket CASSANDRA-11031
+        """
+        session = self.prepare()
+
+        session.execute("""
+            CREATE TABLE IF NOT EXISTS test_filter (
+                k1 int,
+                k2 int,
+                ck1 int,
+                v int,
+                PRIMARY KEY ((k1, k2), ck1)
+            )
+        """)
+
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 0, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 1, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 2, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 3, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 0, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 1, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 2, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 3, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 0, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 1, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 2, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 3, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 0, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 1, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 2, 0)")
+        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 3, 0)")
+
+        # select test
+        assert_all(session,
+                   "SELECT * FROM test_filter WHERE k1 = 0 ALLOW FILTERING",
+                   [[0, 0, 0, 0],
+                    [0, 0, 1, 0],
+                    [0, 0, 2, 0],
+                    [0, 0, 3, 0],
+                    [0, 1, 0, 0],
+                    [0, 1, 1, 0],
+                    [0, 1, 2, 0],
+                    [0, 1, 3, 0]],
+                   ignore_order=True)
+
+        assert_all(session,
+                   "SELECT * FROM test_filter WHERE k1 <= 1 AND k2 >= 1 ALLOW FILTERING",
+                   [[0, 1, 0, 0],
+                    [0, 1, 1, 0],
+                    [0, 1, 2, 0],
+                    [0, 1, 3, 0],
+                    [1, 1, 0, 0],
+                    [1, 1, 1, 0],
+                    [1, 1, 2, 0],
+                    [1, 1, 3, 0]],
+                   ignore_order=True)
+
+        assert_none(session, "SELECT * FROM test_filter WHERE k1 = 2 ALLOW FILTERING")
+        assert_none(session, "SELECT * FROM test_filter WHERE k1 <=0 AND k2 > 1 ALLOW FILTERING")
+
+        assert_all(session,
+                   "SELECT * FROM test_filter WHERE k2 <= 0 ALLOW FILTERING",
+                   [[0, 0, 0, 0],
+                    [0, 0, 1, 0],
+                    [0, 0, 2, 0],
+                    [0, 0, 3, 0],
+                    [1, 0, 0, 0],
+                    [1, 0, 1, 0],
+                    [1, 0, 2, 0],
+                    [1, 0, 3, 0]],
+                   ignore_order=True)
+
+        assert_all(session,
+                   "SELECT * FROM test_filter WHERE k1 <= 0 AND k2 = 0 ALLOW FILTERING",
+                   [[0, 0, 0, 0],
+                    [0, 0, 1, 0],
+                    [0, 0, 2, 0],
+                    [0, 0, 3, 0]])
+
+        assert_all(session,
+                   "SELECT * FROM test_filter WHERE k2 = 1 ALLOW FILTERING",
+                   [[0, 1, 0, 0],
+                    [0, 1, 1, 0],
+                    [0, 1, 2, 0],
+                    [0, 1, 3, 0],
+                    [1, 1, 0, 0],
+                    [1, 1, 1, 0],
+                    [1, 1, 2, 0],
+                    [1, 1, 3, 0]],
+                   ignore_order=True)
+
+        assert_none(session, "SELECT * FROM test_filter WHERE k2 = 2 ALLOW FILTERING")
+
+        # filtering on both Partition Key and Clustering key
+        assert_all(session,
+                   "SELECT * FROM test_filter WHERE k1 = 0 AND ck1=0 ALLOW FILTERING",
+                   [[0, 0, 0, 0],
+                    [0, 1, 0, 0]],
+                   ignore_order=True)
+
+        assert_all(session,
+                   "SELECT * FROM test_filter WHERE k1 = 0 AND k2=1 AND ck1=0 ALLOW FILTERING",
+                   [[0, 1, 0, 0]])
+
+        # count(*) test
+        assert_all(session,
+                   "SELECT count(*) FROM test_filter WHERE k2 = 0 ALLOW FILTERING",
+                   [[8]])
+
+        assert_all(session,
+                   "SELECT count(*) FROM test_filter WHERE k2 = 1 ALLOW FILTERING",
+                   [[8]])
+
+        assert_all(session,
+                   "SELECT count(*) FROM test_filter WHERE k2 = 2 ALLOW FILTERING",
+                   [[0]])
+
+        # test invalid query
+        with pytest.raises(InvalidRequest):
+            session.execute("SELECT * FROM test_filter WHERE k1 = 0")
+
+        with pytest.raises(InvalidRequest):
+            session.execute("SELECT * FROM test_filter WHERE k1 = 0 AND k2 > 0")
+
+        with pytest.raises(InvalidRequest):
+            session.execute("SELECT * FROM test_filter WHERE k1 >= 0 AND k2 in (0,1,2)")
+
+        with pytest.raises(InvalidRequest):
+            session.execute("SELECT * FROM test_filter WHERE k2 > 0")
+
+    def test_batch(self):
+        """
+        Smoke test for BATCH statements:
+
+        - CREATE a table
+        - create a BATCH statement and execute it at QUORUM
+        # TODO run SELECTs to make sure each statement works
+        """
+        session = self.prepare()
+
+        session.execute("""
+            CREATE TABLE test8 (
+                userid text PRIMARY KEY,
+                name text,
+                password text
+            )
+        """)
+
+        query = SimpleStatement("""
+            BEGIN BATCH
+                INSERT INTO test8 (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
+                UPDATE test8 SET password = 'ps22dhds' WHERE userid = 'user3';
+                INSERT INTO test8 (userid, password) VALUES ('user4', 'ch@ngem3c');
+                DELETE name FROM test8 WHERE userid = 'user1';
+            APPLY BATCH;
+        """, consistency_level=ConsistencyLevel.QUORUM)
+        session.execute(query)
+
+
+class TestMiscellaneousCQL(CQLTester):
+    """
+    CQL tests that cannot be performed as Java unit tests, see CASSANDRA-9160.
+    If you're considering adding a test here, consider writing Java unit tests
+    for CQL validation instead. Add a new test here only if there is a reason
+    for it, e.g. the test is related to the client protocol or thrift, requires
+    examining the log files, or must run on multiple nodes.
+    """
+
+    @since('2.1', max_version='3.0')
+    def test_large_collection_errors(self):
+        """
+        Assert C* logs warnings when selecting too large a collection over
+        protocol v2:
+
+        - prepare the cluster and connect using protocol v2
+        - CREATE a table containing a map column
+        - insert over 65535 elements into the map
+        - select all the elements of the map
+        - assert that the correct error was logged
+        """
+
+        # We only warn with protocol 2
+        session = self.prepare(protocol_version=2)
+
+        cluster = self.cluster
+        node1 = cluster.nodelist()[0]
+        self.fixture_dtest_setup.ignore_log_patterns = ["Detected collection for table"]
+
+        session.execute("""
+            CREATE TABLE maps (
+                userid text PRIMARY KEY,
+                properties map<int, text>
+            );
+        """)
+
+        # Insert more than the max, which is 65535
+        for i in range(70000):
+            session.execute("UPDATE maps SET properties[{}] = 'x' WHERE userid = 'user'".format(i))
+
+        # Query for the data and throw exception
+        session.execute("SELECT properties FROM maps WHERE userid = 'user'")
+        node1.watch_log_for("Detected collection for table ks.maps with 70000 elements, more than the 65535 limit. "
+                            "Only the first 65535 elements will be returned to the client. Please see "
+                            "http://cassandra.apache.org/doc/cql3/CQL.html#collections for more details.")
+
+    @since('2.0', max_version='4')
+    def test_cql3_insert_thrift(self):
+        """
+        Check that we can insert from thrift into a CQL3 table:
+
+        - CREATE a table via CQL
+        - insert values via thrift
+        - SELECT the inserted values and assert they are there as expected
+
+        @jira_ticket CASSANDRA-4377
+        """
+        session = self.prepare(start_rpc=True)
+
+        session.execute("""
+            CREATE TABLE test (
+                k int,
+                c int,
+                v int,
+                PRIMARY KEY (k, c)
+            )
+        """)
+
+        node = self.cluster.nodelist()[0]
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+        client.transport.open()
+        client.set_keyspace('ks')
+        key = struct.pack('>i', 2)
+        column_name_component = struct.pack('>i', 4)
+        # component length + component + EOC + component length + component + EOC
+        column_name = b'\x00\x04' + column_name_component + b'\x00' + b'\x00\x01' + 'v'.encode("utf-8") + b'\x00'
+        value = struct.pack('>i', 8)
+        client.batch_mutate(
+            {key: {'test': [Mutation(ColumnOrSuperColumn(column=Column(name=column_name, value=value, timestamp=100)))]}},
+            ThriftConsistencyLevel.ONE)
+
+        assert_one(session, "SELECT * FROM test", [2, 4, 8])
+
+    @since('2.0', max_version='4')
+    def test_rename(self):
+        """
+        Check that a thrift-created table can be renamed via CQL:
+
+        - create a table via the thrift interface
+        - INSERT a row via CQL
+        - ALTER the name of the table via CQL
+        - SELECT from the table and assert the values inserted are there
+        """
+        session = self.prepare(start_rpc=True)
+
+        node = self.cluster.nodelist()[0]
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+        client.transport.open()
+
+        cfdef = CfDef()
+        cfdef.keyspace = 'ks'
+        cfdef.name = 'test'
+        cfdef.column_type = 'Standard'
+        cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)'
+        cfdef.key_validation_class = 'UTF8Type'
+        cfdef.default_validation_class = 'UTF8Type'
+
+        client.set_keyspace('ks')
+        client.system_add_column_family(cfdef)
+
+        session.execute("INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')")
+        session.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3")
+        assert_one(session, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
+
+    def test_invalid_string_literals(self):
+        """
+        @jira_ticket CASSANDRA-8101
+
+        - assert INSERTing into a nonexistent table fails normally, with an InvalidRequest exception
+        - create a table with ascii and text columns
+        - assert that trying to execute an insert statement with non-UTF8 contents raises a ProtocolException
+            - tries to insert into a nonexistent column to make sure the ProtocolException is raised over other errors
+        """
+        session = self.prepare()
+        # this should fail as normal, not with a ProtocolException
+        assert_invalid(session, "insert into invalid_string_literals (k, a) VALUES (0, '\u038E\u0394\u03B4\u03E0')")
+
+        session = self.patient_cql_connection(self.cluster.nodelist()[0], keyspace='ks')
+        session.execute("create table invalid_string_literals (k int primary key, a ascii, b text)")
+
+        # this should still fail with an InvalidRequest
+        assert_invalid(session, "insert into invalid_string_literals (k, c) VALUES (0, '\u038E\u0394\u03B4\u03E0')")
+
+        # try to insert utf-8 characters into an ascii column and make sure it fails
+        with pytest.raises(InvalidRequest, match='Invalid ASCII character in string literal'):
+            session.execute("insert into invalid_string_literals (k, a) VALUES (0, '\xE0\x80\x80')")
+
+    def test_prepared_statement_invalidation(self):
+        """
+        @jira_ticket CASSANDRA-7910
+
+        - CREATE a table and INSERT a row
+        - prepare 2 prepared SELECT statements
+        - SELECT the row with a bound prepared statement and assert it returns the expected row
+        - ALTER the table, dropping a column
+        - assert prepared statement without that column in it still works
+        - assert prepared statement containing that column fails
+        - ALTER the table, adding a column
+        - assert prepared statement without that column in it still works
+        - assert prepared statement containing that column also still works
+        - ALTER the table, changing the type of a column
+        - assert that both prepared statements still work
+        """
+        session = self.prepare()
+
+        session.execute("CREATE TABLE test (k int PRIMARY KEY, a int, b int, c int)")
+        session.execute("INSERT INTO test (k, a, b, c) VALUES (0, 0, 0, 0)")
+
+        wildcard_prepared = session.prepare("SELECT * FROM test")
+        explicit_prepared = session.prepare("SELECT k, a, b, c FROM test")
+        result = session.execute(wildcard_prepared.bind(None))
+        assert result, [(0, 0, 0 == 0)]
+
+        session.execute("ALTER TABLE test DROP c")
+        result = session.execute(wildcard_prepared.bind(None))
+        # wildcard select can be automatically re-prepared by the driver
+        assert result, [(0, 0 == 0)]
+        # but re-preparing the statement with explicit columns should fail
+        # (see PYTHON-207 for why we expect InvalidRequestException instead of the normal exc)
+        assert_invalid(session, explicit_prepared.bind(None), expected=InvalidRequest)
+
+        session.execute("ALTER TABLE test ADD d int")
+        result = session.execute(wildcard_prepared.bind(None))
+        assert result, [(0, 0, 0 == None)]
+
+        if self.cluster.version() < LooseVersion('3.0'):
+            explicit_prepared = session.prepare("SELECT k, a, b, d FROM test")
+
+            # when the type is altered, both statements will need to be re-prepared
+            # by the driver, but the re-preparation should succeed
+            session.execute("ALTER TABLE test ALTER d TYPE blob")
+            result = session.execute(wildcard_prepared.bind(None))
+            assert result, [(0, 0, 0 == None)]
+
+            result = session.execute(explicit_prepared.bind(None))
+            assert result, [(0, 0, 0 == None)]
+
+    def test_range_slice(self):
+        """
+        Regression test for CASSANDRA-1337:
+
+        - CREATE a table
+        - INSERT 2 rows
+        - SELECT * from the table
+        - assert 2 rows were returned
+
+        @jira_ticket CASSANDRA-1337
+        # TODO I don't see how this is an interesting test or how it tests 1337.
+        """
+
+        cluster = self.cluster
+
+        cluster.populate(2).start()
+        node1 = cluster.nodelist()[0]
+        time.sleep(0.2)
+
+        session = self.patient_cql_connection(node1)
+        create_ks(session, 'ks', 1)
+
+        session.execute("""
+            CREATE TABLE test (
+                k text PRIMARY KEY,
+                v int
+            );
+        """)
+        time.sleep(1)
+
+        session.execute("INSERT INTO test (k, v) VALUES ('foo', 0)")
+        session.execute("INSERT INTO test (k, v) VALUES ('bar', 1)")
+
+        res = list(session.execute("SELECT * FROM test"))
+        assert len(res) == 2, res
+
+    @pytest.mark.skip(reason="Skipping until PYTHON-893 is fixed")
+    def test_many_columns(self):
+        """
+        Test for tables with thousands of columns.
+        For CASSANDRA-11621.
+        """
+        session = self.prepare()
+        width = 5000
+        cluster = self.cluster
+
+        session.execute("CREATE TABLE very_wide_table (pk int PRIMARY KEY, " +
+                        ",".join(["c_{} int".format(i) for i in range(width)]) +
+                        ")")
+
+        session.execute("INSERT INTO very_wide_table (pk, " +
+                        ",".join(["c_{}".format(i) for i in range(width)]) +
+                        ") VALUES (100," +
+                        ",".join([str(i) for i in range(width)]) +
+                        ")")
+
+        assert_all(session, "SELECT " +
+                   ",".join(["c_{}".format(i) for i in range(width)]) +
+                   " FROM very_wide_table", [[i for i in range(width)]])
+
+    @since("3.11", max_version="3.X")
+    def test_drop_compact_storage_flag(self):
+        """
+        Test for CASSANDRA-10857, verifying the schema change
+        distribution across the other nodes.
+
+        """
+
+        cluster = self.cluster
+
+        cluster.populate(3).start()
+        node1, node2, node3 = cluster.nodelist()
+
+        session1 = self.patient_cql_connection(node1)
+        session2 = self.patient_cql_connection(node2)
+        session3 = self.patient_cql_connection(node3)
+        create_ks(session1, 'ks', 3)
+        sessions = [session1, session2, session3]
+
+        for session in sessions:
+            session.set_keyspace('ks')
+
+        session1.execute("""
+            CREATE TABLE test_drop_compact_storage (k int PRIMARY KEY, s1 int) WITH COMPACT STORAGE;
+        """)
+
+        session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (1,1)")
+        session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (2,2)")
+        session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (3,3)")
+
+        for session in sessions:
+            res = session.execute("SELECT * from test_drop_compact_storage")
+            assert rows_to_list(res) == [[1, 1], [2, 2], [3, 3]]
+
+        session1.execute("ALTER TABLE test_drop_compact_storage DROP COMPACT STORAGE")
+
+        for session in sessions:
+            assert_all(session, "SELECT * from test_drop_compact_storage",
+                       [[1, None, 1, None],
+                        [2, None, 2, None],
+                        [3, None, 3, None]])
+
+
+@since('3.2')
+class AbortedQueryTester(CQLTester):
+    """
+    @jira_ticket CASSANDRA-7392
+
+    Test that read-queries that take longer than read_request_timeout_in_ms
+    time out.
+
+    # TODO The important part of these is "set up a combination of
+    #      configuration options that will make all reads time out, then
+    #      try to read and assert it times out". This can probably be made much
+    #      simpler -- most of the logic can be factored out. In many cases it
+    #      probably isn't even necessary to define a custom table or to insert
+    #      more than one value.
+    """
+
+    def test_local_query(self):
+        """
+        Check that a query running on the local coordinator node times out:
+
+        - set the read request timeouts to 1 second
+        - start the cluster with read_iteration_delay set to 5 ms
+            - the delay will be applied ot each row iterated and will cause
+              read queries to take longer than the read timeout
+        - CREATE and INSERT into a table
+        - SELECT * from the table using a retry policy that never retries, and assert it times out
+
+        @jira_ticket CASSANDRA-7392
+        """
+        cluster = self.cluster
+        cluster.set_configuration_options(values={'request_timeout_in_ms': 1000,
+                                                  'read_request_timeout_in_ms': 1000,
+                                                  'range_request_timeout_in_ms': 1000})
+
+        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
+        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds every
+        # CQL row iterated for non system queries, so that these queries take much longer to complete,
+        # see ReadCommand.withStateTracking()
+        cluster.populate(1).start(wait_for_binary_proto=True,
+                                  jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+                                            "-Dcassandra.test.read_iteration_delay_ms=5"])
+        node = cluster.nodelist()[0]
+        session = self.patient_cql_connection(node)
+
+        create_ks(session, 'ks', 1)
+        session.execute("""
+            CREATE TABLE test1 (
+                id int PRIMARY KEY,
+                val text
+            );
+        """)
+
+        for i in range(500):
+            session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i))
+
+        # use debug logs because at info level no-spam logger has unpredictable results
+        mark = node.mark_log(filename='debug.log')
+        statement = SimpleStatement("SELECT * from test1",
+                                    consistency_level=ConsistencyLevel.ONE,
+                                    retry_policy=FallthroughRetryPolicy())
+        assert_unavailable(lambda c: logger.debug(c.execute(statement)), session)
+        node.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=120)
+
+    def test_remote_query(self):
+        """
+        Check that a query running on a node other than the coordinator times out:
+
+        - populate the cluster with 2 nodes
+        - set the read request timeouts to 1 second
+        - start one node without having it join the ring
+        - start the other node with read_iteration_delay set to 5 ms
+            - the delay will be applied ot each row iterated and will cause
+              read queries to take longer than the read timeout
+        - CREATE a table
+        - INSERT 5000 rows on a session on the node that is not a member of the ring
+        - run SELECT statements and assert they fail
+        # TODO refactor SELECT statements:
+        #        - run the statements in a loop to reduce duplication
+        #        - watch the log after each query
+        #        - assert we raise the right error
+        """
+        cluster = self.cluster
+        cluster.set_configuration_options(values={'request_timeout_in_ms': 1000,
+                                                  'read_request_timeout_in_ms': 1000,
+                                                  'range_request_timeout_in_ms': 1000})
+
+        cluster.populate(2)
+        node1, node2 = cluster.nodelist()
+
+        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
+        node2.start(wait_for_binary_proto=True,
+                    jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+                              "-Dcassandra.test.read_iteration_delay_ms=5"])  # see above for explanation
+
+        session = self.patient_exclusive_cql_connection(node1)
+
+        create_ks(session, 'ks', 1)
+        session.execute("""
+            CREATE TABLE test2 (
+                id int,
+                col int,
+                val text,
+                PRIMARY KEY(id, col)
+            );
+        """)
+
+        for i, j in itertools.product(list(range(10)), list(range(500))):
+            session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j))
+
+        # use debug logs because at info level no-spam logger has unpredictable results
+        mark = node2.mark_log(filename='debug.log')
+
+        statement = SimpleStatement("SELECT * from test2",
+                                    consistency_level=ConsistencyLevel.ONE,
+                                    retry_policy=FallthroughRetryPolicy())
+        assert_unavailable(lambda c: logger.debug(c.execute(statement)), session)
+
+        statement = SimpleStatement("SELECT * from test2 where id = 1",
+                                    consistency_level=ConsistencyLevel.ONE,
+                                    retry_policy=FallthroughRetryPolicy())
+        assert_unavailable(lambda c: logger.debug(c.execute(statement)), session)
+
+        statement = SimpleStatement("SELECT * from test2 where id IN (1, 2, 3) AND col > 10",
+                                    consistency_level=ConsistencyLevel.ONE,
+                                    retry_policy=FallthroughRetryPolicy())
+        assert_unavailable(lambda c: logger.debug(c.execute(statement)), session)
+
+        statement = SimpleStatement("SELECT * from test2 where col > 5 ALLOW FILTERING",
+                                    consistency_level=ConsistencyLevel.ONE,
+                                    retry_policy=FallthroughRetryPolicy())
+        assert_unavailable(lambda c: logger.debug(c.execute(statement)), session)
+
+        node2.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=60)
+
+    def test_index_query(self):
+        """
+        Check that a secondary index query times out:
+
+        - populate a 1-node cluster
+        - set the read request timeouts to 1 second
+        - start one node without having it join the ring
+        - start the other node with read_iteration_delay set to 5 ms
+            - the delay will be applied ot each row iterated and will cause
+              read queries to take longer than the read timeout
+        - CREATE a table
+        - CREATE an index on the table
+        - INSERT 500 values into the table
+        - SELECT over the table and assert it times out
+        """
+        cluster = self.cluster
+        cluster.set_configuration_options(values={'request_timeout_in_ms': 1000,
+                                                  'read_request_timeout_in_ms': 1000,
+                                                  'range_request_timeout_in_ms': 1000})
+
+        cluster.populate(1).start(wait_for_binary_proto=True,
+                                  jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+                                            "-Dcassandra.test.read_iteration_delay_ms=5"])  # see above for explanation
+        node = cluster.nodelist()[0]
+        session = self.patient_cql_connection(node)
+
+        create_ks(session, 'ks', 1)
+        session.execute("""
+            CREATE TABLE test3 (
+                id int PRIMARY KEY,
+                col int,
+                val text
+            );
+        """)
+
+        session.execute("CREATE INDEX ON test3 (col)")
+
+        for i in range(500):
+            session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, 50, 'foo')".format(i))
+
+        # use debug logs because at info level no-spam logger has unpredictable results
+        mark = node.mark_log(filename='debug.log')
+        statement = session.prepare("SELECT * from test3 WHERE col = ? ALLOW FILTERING")
+        statement.consistency_level = ConsistencyLevel.ONE
+        statement.retry_policy = FallthroughRetryPolicy()
+        assert_unavailable(lambda c: logger.debug(c.execute(statement, [50])), session)
+        node.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=120)
+
+    def test_materialized_view(self):
+        """
+        Check that a materialized view query times out:
+
+        - populate a 2-node cluster
+        - set the read request timeouts to 1 second
+        - start one node without having it join the ring
+        - start the other node with read_iteration_delay set to 5 ms
+            - the delay will be applied ot each row iterated and will cause
+              read queries to take longer than the read timeout
+        - CREATE a table
+        - INSERT 500 values into that table
+        - CREATE a materialized view over that table
+        - assert querying that table results in an unavailable exception
+        """
+        cluster = self.cluster
+        cluster.set_configuration_options(values={'request_timeout_in_ms': 1000,
+                                                  'read_request_timeout_in_ms': 1000,
+                                                  'range_request_timeout_in_ms': 1000})
+
+        cluster.populate(2)
+        node1, node2 = cluster.nodelist()
+
+        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
+        node2.start(wait_for_binary_proto=True,
+                    jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+                              "-Dcassandra.test.read_iteration_delay_ms=5"])  # see above for explanation
+
+        session = self.patient_exclusive_cql_connection(node1)
+
+        create_ks(session, 'ks', 1)
+        session.execute("""
+            CREATE TABLE test4 (
+                id int PRIMARY KEY,
+                col int,
+                val text
+            );
+        """)
+
+        session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test4 "
+                         "WHERE col IS NOT NULL AND id IS NOT NULL PRIMARY KEY (col, id)"))
+
+        for i in range(500):
+            session.execute("INSERT INTO test4 (id, col, val) VALUES ({}, 50, 'foo')".format(i))
+
+        # use debug logs because at info level no-spam logger has unpredictable results
+        mark = node2.mark_log(filename='debug.log')
+        statement = SimpleStatement("SELECT * FROM mv WHERE col = 50",
+                                    consistency_level=ConsistencyLevel.ONE,
+                                    retry_policy=FallthroughRetryPolicy())
+
+        assert_unavailable(lambda c: logger.debug(c.execute(statement)), session)
+        node2.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=60)
+
+
+@since('3.10')
+class TestCQLSlowQuery(CQLTester):
+    """
+    Test slow query logging.
+
+    @jira_ticket CASSANDRA-12403
+    """
+    def test_local_query(self):
+        """
+        Check that a query running locally on the coordinator is reported as slow:
+
+        - start a one node cluster with slow_query_log_timeout_in_ms set to a small value
+          and the read request timeouts set to a large value (to ensure the query is not aborted) and
+          read_iteration_delay set to a value big enough for the query to exceed slow_query_log_timeout_in_ms
+          (this will cause read queries to take longer than the slow query timeout)
+        - CREATE and INSERT into a table
+        - SELECT * from the table using a retry policy that never retries, and check that the slow
+          query log messages are present in the debug logs (we cannot check the logs at info level because
+          the no spam logger has unpredictable results)
+
+        @jira_ticket CASSANDRA-12403
+        """
+        cluster = self.cluster
+        cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 10,
+                                                  'request_timeout_in_ms': 120000,
+                                                  'read_request_timeout_in_ms': 120000,
+                                                  'range_request_timeout_in_ms': 120000})
+
+        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
+        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
+        # iteration of non system queries, so that these queries take much longer to complete,
+        # see ReadCommand.withStateTracking()
+        cluster.populate(1).start(wait_for_binary_proto=True,
+                                  jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+                                            "-Dcassandra.test.read_iteration_delay_ms=1"])
+        node = cluster.nodelist()[0]
+        session = self.patient_cql_connection(node)
+
+        create_ks(session, 'ks', 1)
+        session.execute("""
+            CREATE TABLE test1 (
+                id int,
+                col int,
+                val text,
+                PRIMARY KEY(id, col)
+            );
+        """)
+
+        for i in range(100):
+            session.execute("INSERT INTO test1 (id, col, val) VALUES (1, {}, 'foo')".format(i))
+
+        # only check debug logs because at INFO level the no-spam logger has unpredictable results
+        mark = node.mark_log(filename='debug.log')
+
+        session.execute(SimpleStatement("SELECT * from test1",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"],
+                           from_mark=mark, filename='debug.log', timeout=60)
+        mark = node.mark_log(filename='debug.log')
+
+        session.execute(SimpleStatement("SELECT * from test1 where id = 1",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"],
+                           from_mark=mark, filename='debug.log', timeout=60)
+        mark = node.mark_log(filename='debug.log')
+
+        session.execute(SimpleStatement("SELECT * from test1 where id = 1",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"],
+                           from_mark=mark, filename='debug.log', timeout=60)
+        mark = node.mark_log(filename='debug.log')
+
+        session.execute(SimpleStatement("SELECT * from test1 where token(id) < 0",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"],
+                           from_mark=mark, filename='debug.log', timeout=60)
+
+    def test_remote_query(self):
+        """
+        Check that a query running on a node other than the coordinator is reported as slow:
+
+        - populate the cluster with 2 nodes
+        - start one node without having it join the ring
+        - start the other one node with slow_query_log_timeout_in_ms set to a small value
+          and the read request timeouts set to a large value (to ensure the query is not aborted) and
+          read_iteration_delay set to a value big enough for the query to exceed slow_query_log_timeout_in_ms
+          (this will cause read queries to take longer than the slow query timeout)
+        - CREATE a table
+        - INSERT 5000 rows on a session on the node that is not a member of the ring
+        - run SELECT statements and check that the slow query messages are present in the debug logs
+          (we cannot check the logs at info level because the no spam logger has unpredictable results)
+
+        @jira_ticket CASSANDRA-12403
+        """
+        cluster = self.cluster
+        cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 10,
+                                                  'request_timeout_in_ms': 120000,
+                                                  'read_request_timeout_in_ms': 120000,
+                                                  'range_request_timeout_in_ms': 120000})
+
+        cluster.populate(2)
+        node1, node2 = cluster.nodelist()
+
+        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
+        node2.start(wait_for_binary_proto=True,
+                    jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+                              "-Dcassandra.test.read_iteration_delay_ms=1"])  # see above for explanation
+
+        session = self.patient_exclusive_cql_connection(node1)
+
+        create_ks(session, 'ks', 1)
+        session.execute("""
+            CREATE TABLE test2 (
+                id int,
+                col int,
+                val text,
+                PRIMARY KEY(id, col)
+            );
+        """)
+
+        for i, j in itertools.product(list(range(100)), list(range(10))):
+            session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j))
+
+        # only check debug logs because at INFO level the no-spam logger has unpredictable results
+        mark = node2.mark_log(filename='debug.log')
+        session.execute(SimpleStatement("SELECT * from test2",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"],
+                            from_mark=mark, filename='debug.log', timeout=60)
+        mark = node2.mark_log(filename='debug.log')
+
+        session.execute(SimpleStatement("SELECT * from test2 where id = 1",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"],
+                            from_mark=mark, filename='debug.log', timeout=60)
+        mark = node2.mark_log(filename='debug.log')
+
+        session.execute(SimpleStatement("SELECT * from test2 where id = 1",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"],
+                            from_mark=mark, filename='debug.log', timeout=60)
+        mark = node2.mark_log(filename='debug.log')
+
+        session.execute(SimpleStatement("SELECT * from test2 where token(id) < 0",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"],
+                            from_mark=mark, filename='debug.log', timeout=60)
+
+    def test_disable_slow_query_log(self):
+        """
+        Check that a query is NOT reported as slow if slow query logging is disabled.
+
+        - start a one node cluster with slow_query_log_timeout_in_ms set to 0 milliseconds
+          (this will disable slow query logging), the read request timeouts set to a large value
+          (to ensure queries are not aborted) and read_iteration_delay set to 5 milliseconds
+          (this will cause read queries to take longer than usual)
+        - CREATE and INSERT into a table
+        - SELECT * from the table using a retry policy that never retries, and check that the slow
+          query log messages are present in the logs
+
+        @jira_ticket CASSANDRA-12403
+        """
+        cluster = self.cluster
+        cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 0,
+                                                  'request_timeout_in_ms': 120000,
+                                                  'read_request_timeout_in_ms': 120000,
+                                                  'range_request_timeout_in_ms': 120000})
+
+        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
+        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
+        # iteration of non system queries, so that these queries take much longer to complete,
+        # see ReadCommand.withStateTracking()
+        cluster.populate(1).start(wait_for_binary_proto=True,
+                                  jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+                                            "-Dcassandra.test.read_iteration_delay_ms=1"])
+        node = cluster.nodelist()[0]
+        session = self.patient_cql_connection(node)
+
+        create_ks(session, 'ks', 1)
+        session.execute("""
+            CREATE TABLE test3 (
+                id int PRIMARY KEY,
+                val text
+            );
+        """)
+
+        for i in range(100):
+            session.execute("INSERT INTO test3 (id, val) VALUES ({}, 'foo')".format(i))
+
+        session.execute(SimpleStatement("SELECT * from test3",
+                                        consistency_level=ConsistencyLevel.ONE,
+                                        retry_policy=FallthroughRetryPolicy()))
+
+        time.sleep(1)  # do our best to ensure logs had a chance to appear
+
+        self._check_logs(node, "SELECT \* FROM ks.test3", 'debug.log', 0)
+
+    def _check_logs(self, node, pattern, filename, num_expected):
+        ret = node.grep_log(pattern, filename=filename)
+        assert_length_equal(ret, num_expected)
+
+
+class TestLWTWithCQL(Tester):
+    """
+    Validate CQL queries for LWTs for static columns for null and non-existing rows
+    @jira_ticket CASSANDRA-9842
+    """
+
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_post_initialize_cluster(self, fixture_dtest_setup):
+        cluster = fixture_dtest_setup.cluster
+        cluster.populate(3)
+        cluster.start(wait_for_binary_proto=True)
+
+    def get_lwttester_session(self):
+        node1 = self.cluster.nodelist()[0]
+        session = self.patient_cql_connection(node1)
+        session.execute("""CREATE KEYSPACE IF NOT EXISTS ks WITH REPLICATION={'class':'SimpleStrategy',
+            'replication_factor':1}""")
+        session.execute("USE ks")
+        return session
+
+    def test_lwt_with_static_columns(self):
+        session = self.get_lwttester_session()
+
+        session.execute("""
+            CREATE TABLE lwt_with_static (a int, b int, s int static, d text, PRIMARY KEY (a, b))
+        """)
+
+        assert_one(session, "UPDATE lwt_with_static SET s = 1 WHERE a = 1 IF s = NULL", [True])
+
+        assert_one(session, "SELECT * FROM lwt_with_static", [1, None, 1, None])
+
+        assert_one(session, "UPDATE lwt_with_static SET s = 2 WHERE a = 2 IF EXISTS", [False])
+
+        assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 1", [1, None, 1, None])
+
+        assert_one(session, "INSERT INTO lwt_with_static (a, s) VALUES (2, 2) IF NOT EXISTS", [True])
+
+        assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 2", [2, None, 2, None])
+
+        assert_one(session, "BEGIN BATCH\n" +
+                   "INSERT INTO lwt_with_static (a, b, d) values (3, 3, 'a');\n" +
+                   "UPDATE lwt_with_static SET s = 3 WHERE a = 3 IF s = null;\n" +
+                   "APPLY BATCH;", [True])
+
+        assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 3", [3, 3, 3, "a"])
+
+        # LWT applies before INSERT
+        assert_one(session, "BEGIN BATCH\n" +
+                   "INSERT INTO lwt_with_static (a, b, d) values (4, 4, 'a');\n" +
+                   "UPDATE lwt_with_static SET s = 4 WHERE a = 4 IF s = null;\n" +
+                   "APPLY BATCH;", [True])
+
+        assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 4", [4, 4, 4, "a"])
+
+    def _validate_non_existing_or_null_values(self, table_name, session):
+        assert_one(session, "UPDATE {} SET s = 1 WHERE a = 1 IF s = NULL".format(table_name), [True])
+
+        assert_one(session, "SELECT a, s, d FROM {} WHERE a = 1".format(table_name), [1, 1, None])
+
+        assert_one(session, "UPDATE {} SET s = 2 WHERE a = 2 IF s IN (10,20,NULL)".format(table_name), [True])
+
+        assert_one(session, "SELECT a, s, d FROM {} WHERE a = 2".format(table_name), [2, 2, None])
+
+        assert_one(session, "UPDATE {} SET s = 4 WHERE a = 4 IF s != 4".format(table_name), [True])
+
+        assert_one(session, "SELECT a, s, d FROM {} WHERE a = 4".format(table_name), [4, 4, None])
+
+    def _is_new_lwt_format_version(self, version):
+        return version > LooseVersion('3.9') or (version > LooseVersion('3.0.9') and version < LooseVersion('3.1'))
+
+    @flaky
+    def test_conditional_updates_on_static_columns_with_null_values(self):
+        session = self.get_lwttester_session()
+
+        table_name = "conditional_updates_on_static_columns_with_null"
+        session.execute("""
+            CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b))
+        """.format(table_name))
+
+        for i in range(1, 6):
+            session.execute("INSERT INTO {} (a, b) VALUES ({}, {})".format(table_name, i, i))
+
+        self._validate_non_existing_or_null_values(table_name, session)
+
+        assert_one(session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(table_name),
+                   [False, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 3".format(table_name), [3, 3, None, None])
+
+        for operator in [">", "<", ">=", "<=", "="]:
+            assert_one(session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(table_name, operator),
+                       [False, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False])
+
+            assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, None, None])
+
+    def test_conditional_updates_on_static_columns_with_non_existing_values(self):
+        session = self.get_lwttester_session()
+
+        table_name = "conditional_updates_on_static_columns_with_ne"
+        session.execute("""
+            CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b))
+        """.format(table_name))
+
+        self._validate_non_existing_or_null_values(table_name, session)
+
+        assert_one(session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(table_name), [False])
+
+        assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name))
+
+        for operator in [">", "<", ">=", "<=", "="]:
+            assert_one(session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(table_name, operator), [False])
+
+            assert_none(session, "SELECT * FROM {} WHERE a = 5".format(table_name))
+
+    def _validate_non_existing_or_null_values_batch(self, table_name, session):
+        assert_one(session, """
+            BEGIN BATCH
+                INSERT INTO {table_name} (a, b, d) values (2, 2, 'a');
+                UPDATE {table_name} SET s = 2 WHERE a = 2 IF s = null;
+            APPLY BATCH""".format(table_name=table_name), [True])
+
+        assert_one(session, "SELECT * FROM {table_name} WHERE a = 2".format(table_name=table_name), [2, 2, 2, "a"])
+
+        assert_one(session, """
+            BEGIN BATCH
+                INSERT INTO {table_name} (a, b, s, d) values (4, 4, 4, 'a')
+                UPDATE {table_name} SET s = 5 WHERE a = 4 IF s = null;
+            APPLY BATCH""".format(table_name=table_name), [True])
+
+        assert_one(session, "SELECT * FROM {table_name} WHERE a = 4".format(table_name=table_name), [4, 4, 5, "a"])
+
+        assert_one(session, """
+            BEGIN BATCH
+                INSERT INTO {table_name} (a, b, s, d) values (5, 5, 5, 'a')
+                UPDATE {table_name} SET s = 6 WHERE a = 5 IF s IN (1,2,null)
+            APPLY BATCH""".format(table_name=table_name), [True])
+
+        assert_one(session, "SELECT * FROM {table_name} WHERE a = 5".format(table_name=table_name), [5, 5, 6, "a"])
+
+        assert_one(session, """
+            BEGIN BATCH
+                INSERT INTO {table_name} (a, b, s, d) values (7, 7, 7, 'a')
+                UPDATE {table_name} SET s = 8 WHERE a = 7 IF s != 7;
+            APPLY BATCH""".format(table_name=table_name), [True])
+
+        assert_one(session, "SELECT * FROM {table_name} WHERE a = 7".format(table_name=table_name), [7, 7, 8, "a"])
+
+    def test_conditional_updates_on_static_columns_with_null_values_batch(self):
+        session = self.get_lwttester_session()
+
+        table_name = "lwt_on_static_columns_with_null_batch"
+        session.execute("""
+            CREATE TABLE {table_name} (a int, b int, s int static, d text, PRIMARY KEY (a, b))
+        """.format(table_name=table_name))
+
+        for i in range(1, 7):
+            session.execute("INSERT INTO {table_name} (a, b) VALUES ({i}, {i})".format(table_name=table_name, i=i))
+
+        self._validate_non_existing_or_null_values_batch(table_name, session)
+
+        for operator in [">", "<", ">=", "<=", "="]:
+            assert_one(session, """
+                BEGIN BATCH
+                    INSERT INTO {table_name} (a, b, s, d) values (3, 3, 40, 'a')
+                    UPDATE {table_name} SET s = 30 WHERE a = 3 IF s {operator} 5;
+                APPLY BATCH""".format(table_name=table_name, operator=operator),
+                       [False, 3, 3, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False])
+
+            assert_one(session, "SELECT * FROM {table_name} WHERE a = 3".format(table_name=table_name), [3, 3, None, None])
+
+        assert_one(session, """
+                BEGIN BATCH
+                    INSERT INTO {table_name} (a, b, s, d) values (6, 6, 70, 'a')
+                    UPDATE {table_name} SET s = 60 WHERE a = 6 IF s IN (1,2,3)
+                APPLY BATCH""".format(table_name=table_name),
+                   [False, 6, 6, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False])
+
+        assert_one(session, "SELECT * FROM {table_name} WHERE a = 6".format(table_name=table_name), [6, 6, None, None])
+
+    def test_conditional_deletes_on_static_columns_with_null_values(self):
+        session = self.get_lwttester_session()
+
+        table_name = "conditional_deletes_on_static_with_null"
+        session.execute("""
+            CREATE TABLE {} (a int, b int, s1 int static, s2 int static, v int, PRIMARY KEY (a, b))
+        """.format(table_name))
+
+        for i in range(1, 6):
+            session.execute("INSERT INTO {} (a, b, s1, s2, v) VALUES ({}, {}, {}, null, {})".format(table_name, i, i, i, i))
+
+        assert_one(session, "DELETE s1 FROM {} WHERE a = 1 IF s2 = null".format(table_name), [True])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 1".format(table_name), [1, 1, None, None, 1])
+
+        assert_one(session, "DELETE s1 FROM {} WHERE a = 2 IF s2 IN (10,20,30)".format(table_name), [False, None])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 2".format(table_name), [2, 2, 2, None, 2])
+
+        assert_one(session, "DELETE s1 FROM {} WHERE a = 3 IF s2 IN (null,20,30)".format(table_name), [True])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 3".format(table_name), [3, 3, None, None, 3])
+
+        assert_one(session, "DELETE s1 FROM {} WHERE a = 4 IF s2 != 4".format(table_name), [True])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 4".format(table_name), [4, 4, None, None, 4])
+
+        for operator in [">", "<", ">=", "<=", "="]:
+            assert_one(session, "DELETE s1 FROM {} WHERE a = 5 IF s2 {} 3".format(table_name, operator), [False, None])
+            assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, 5, None, 5])
+
+    def test_conditional_deletes_on_static_columns_with_null_values_batch(self):
+        session = self.get_lwttester_session()
+
+        table_name = "conditional_deletes_on_static_with_null_batch"
+        session.execute("""
+            CREATE TABLE {} (a int, b int, s1 int static, s2 int static, v int, PRIMARY KEY (a, b))
+        """.format(table_name))
+
+        assert_one(session, """
+             BEGIN BATCH
+                 INSERT INTO {table_name} (a, b, s1, v) values (2, 2, 2, 2);
+                 DELETE s1 FROM {table_name} WHERE a = 2 IF s2 = null;
+             APPLY BATCH""".format(table_name=table_name), [True])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 2".format(table_name), [2, 2, None, None, 2])
+
+        for operator in [">", "<", ">=", "<=", "="]:
+            assert_one(session, """
+                BEGIN BATCH
+                    INSERT INTO {table_name} (a, b, s1, v) values (3, 3, 3, 3);
+                    DELETE s1 FROM {table_name} WHERE a = 3 IF s2 {operator} 5;
+                APPLY BATCH""".format(table_name=table_name, operator=operator), [False])
+
+            assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name))
+
+        assert_one(session, """
+             BEGIN BATCH
+                 INSERT INTO {table_name} (a, b, s1, v) values (6, 6, 6, 6);
+                 DELETE s1 FROM {table_name} WHERE a = 6 IF s2 IN (1,2,3);
+             APPLY BATCH""".format(table_name=table_name), [False])
+
+        assert_none(session, "SELECT * FROM {} WHERE a = 6".format(table_name))
+
+        assert_one(session, """
+             BEGIN BATCH
+                 INSERT INTO {table_name} (a, b, s1, v) values (4, 4, 4, 4);
+                 DELETE s1 FROM {table_name} WHERE a = 4 IF s2 = null;
+             APPLY BATCH""".format(table_name=table_name), [True])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 4".format(table_name), [4, 4, None, None, 4])
+
+        assert_one(session, """
+            BEGIN BATCH
+                INSERT INTO {table_name} (a, b, s1, v) VALUES (5, 5, 5, 5);
+                DELETE s1 FROM {table_name} WHERE a = 5 IF s1 IN (1,2,null);
+            APPLY BATCH""".format(table_name=table_name), [True])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, None, None, 5])
+
+        assert_one(session, """
+            BEGIN BATCH
+                INSERT INTO {table_name} (a, b, s1, v) values (7, 7, 7, 7);
+                DELETE s1 FROM {table_name} WHERE a = 7 IF s2 != 7;
+            APPLY BATCH""".format(table_name=table_name), [True])
+
+        assert_one(session, "SELECT * FROM {} WHERE a = 7".format(table_name), [7, 7, None, None, 7])
+
+    def lwt_with_empty_resultset(self):
+        """
+        LWT with unset row.
+        @jira_ticket CASSANDRA-12694
+        """
+        session = self.get_lwttester_session()
+
+        session.execute("""
+            CREATE TABLE test (pk text, v1 int, v2 text, PRIMARY KEY (pk));
+        """)
+        session.execute("update test set v1 = 100 where pk = 'test1';")
+        node1 = self.cluster.nodelist()[0]
+        self.cluster.flush()
+        assert_one(session, "UPDATE test SET v1 = 100 WHERE pk = 'test1' IF v2 = null;", [True])


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[33/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/commitlog_test.py
----------------------------------------------------------------------
diff --git a/commitlog_test.py b/commitlog_test.py
index 99c4caf..c8830cd 100644
--- a/commitlog_test.py
+++ b/commitlog_test.py
@@ -5,6 +5,8 @@ import stat
 import struct
 import time
 from distutils.version import LooseVersion
+import pytest
+import logging
 
 from cassandra import WriteTimeout
 from cassandra.cluster import NoHostAvailable, OperationTimedOut
@@ -12,28 +14,33 @@ from ccmlib.common import is_win
 from ccmlib.node import Node, TimeoutError
 from parse import parse
 
-from dtest import Tester, debug, create_ks
-from tools.assertions import assert_almost_equal, assert_none, assert_one
+from dtest import Tester, create_ks
+from tools.assertions import (assert_almost_equal, assert_none, assert_one, assert_lists_equal_ignoring_order)
 from tools.data import rows_to_list
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestCommitLog(Tester):
     """
     CommitLog Tests
     """
-    allow_log_errors = True
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.allow_log_errors = True
+
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_set_cluster_settings(self, fixture_dtest_setup):
+        fixture_dtest_setup.cluster.populate(1)
+        [self.node1] = fixture_dtest_setup.cluster.nodelist()
 
-    def setUp(self):
-        super(TestCommitLog, self).setUp()
-        self.cluster.populate(1)
-        [self.node1] = self.cluster.nodelist()
+        yield
 
-    def tearDown(self):
         # Some of the tests change commitlog permissions to provoke failure
         # so this changes them back so we can delete them.
         self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
-        super(TestCommitLog, self).tearDown()
+
 
     def prepare(self, configuration=None, create_test_keyspace=True, **kwargs):
         if configuration is None:
@@ -41,7 +48,7 @@ class TestCommitLog(Tester):
         default_conf = {'commitlog_sync_period_in_ms': 1000}
 
         set_conf = dict(default_conf, **configuration)
-        debug('setting commitlog configuration with the following values: '
+        logger.debug('setting commitlog configuration with the following values: '
               '{set_conf} and the following kwargs: {kwargs}'.format(
                   set_conf=set_conf, kwargs=kwargs))
         self.cluster.set_configuration_options(values=set_conf, **kwargs)
@@ -61,15 +68,15 @@ class TestCommitLog(Tester):
 
     def _change_commitlog_perms(self, mod):
         for path in self._get_commitlog_paths():
-            debug('changing permissions to {perms} on {path}'.format(perms=oct(mod), path=path))
+            logger.debug('changing permissions to {perms} on {path}'.format(perms=oct(mod), path=path))
             os.chmod(path, mod)
             commitlogs = glob.glob(path + '/*')
 
             if commitlogs:
-                debug('changing permissions to {perms} on the following files:'
+                logger.debug('changing permissions to {perms} on the following files:'
                       '\n  {files}'.format(perms=oct(mod), files='\n  '.join(commitlogs)))
             else:
-                debug(self._change_commitlog_perms.__name__ + ' called on empty commitlog directory '
+                logger.debug(self._change_commitlog_perms.__name__ + ' called on empty commitlog directory '
                       '{path} with permissions {perms}'.format(path=path, perms=oct(mod)))
 
             for commitlog in commitlogs:
@@ -108,7 +115,7 @@ class TestCommitLog(Tester):
         time.sleep(1)
 
         commitlogs = self._get_commitlog_files()
-        self.assertGreater(len(commitlogs), 0, 'No commit log files were created')
+        assert len(commitlogs) > 0, 'No commit log files were created'
 
         # the most recently-written segment of the commitlog may be smaller
         # than the expected size, so we allow exactly one segment to be smaller
@@ -116,23 +123,23 @@ class TestCommitLog(Tester):
         for i, f in enumerate(commitlogs):
             size = os.path.getsize(f)
             size_in_mb = int(size / 1024 / 1024)
-            debug('segment file {} {}; smaller already found: {}'.format(f, size_in_mb, smaller_found))
+            logger.debug('segment file {} {}; smaller already found: {}'.format(f, size_in_mb, smaller_found))
             if size_in_mb < 1 or size < (segment_size * 0.1):
-                debug('segment file not yet used; moving to next file')
+                logger.debug('segment file not yet used; moving to next file')
                 continue  # commitlog not yet used
 
             try:
                 if compressed:
                     # if compression is used, we assume there will be at most a 50% compression ratio
-                    self.assertLess(size, segment_size)
-                    self.assertGreater(size, segment_size / 2)
+                    assert size < segment_size
+                    assert size > segment_size / 2
                 else:
                     # if no compression is used, the size will be close to what we expect
                     assert_almost_equal(size, segment_size, error=0.05)
             except AssertionError as e:
                 # the last segment may be smaller
                 if not smaller_found:
-                    self.assertLessEqual(size, segment_size)
+                    assert size <= segment_size
                     smaller_found = True
                 else:
                     raise e
@@ -141,7 +148,7 @@ class TestCommitLog(Tester):
         """
         Provoke the commitlog failure
         """
-        debug('Provoking commitlog failure')
+        logger.debug('Provoking commitlog failure')
         # Test things are ok at this point
         self.session1.execute("""
             INSERT INTO test (key, col1) VALUES (1, 1);
@@ -164,17 +171,16 @@ class TestCommitLog(Tester):
         replay due to MV lock contention.  Fixed in 3.0.7 and 3.7.
         @jira_ticket CASSANDRA-11891
         """
-
         cluster_ver = self.cluster.version()
         if LooseVersion('3.1') <= cluster_ver < LooseVersion('3.7'):
-            self.skipTest("Fixed in 3.0.7 and 3.7")
+            pytest.skip("Fixed in 3.0.7 and 3.7")
 
         node1 = self.node1
         node1.set_batch_commitlog(enabled=True)
         node1.start()
         session = self.patient_cql_connection(node1)
 
-        debug("Creating schema")
+        logger.debug("Creating schema")
         create_ks(session, 'Test', 1)
         session.execute("""
             CREATE TABLE mytable (
@@ -192,37 +198,37 @@ class TestCommitLog(Tester):
             PRIMARY KEY (a, b);
         """)
 
-        debug("Insert data")
+        logger.debug("Insert data")
         num_rows = 1024  # maximum number of mutations replayed at once by the commit log
-        for i in xrange(num_rows):
+        for i in range(num_rows):
             session.execute("INSERT INTO Test.mytable (a, b, c) VALUES (0, {i}, {i})".format(i=i))
 
         node1.stop(gently=False)
         node1.mark_log_for_errors()
 
-        debug("Verify commitlog was written before abrupt stop")
+        logger.debug("Verify commitlog was written before abrupt stop")
         commitlog_files = os.listdir(os.path.join(node1.get_path(), 'commitlogs'))
-        self.assertNotEqual([], commitlog_files)
+        assert [] != commitlog_files
 
         # set a short timeout to ensure lock contention will generally exceed this
         node1.set_configuration_options({'write_request_timeout_in_ms': 30})
-        debug("Starting node again")
+        logger.debug("Starting node again")
         node1.start()
 
-        debug("Verify commit log was replayed on startup")
+        logger.debug("Verify commit log was replayed on startup")
         start_time, replay_complete = time.time(), False
         while not replay_complete:
             matches = node1.grep_log(r".*WriteTimeoutException.*")
-            self.assertEqual([], matches)
+            assert [] == matches
 
             replay_complete = node1.grep_log("Log replay complete")
-            self.assertLess(time.time() - start_time, 120, "Did not finish commitlog replay within 120 seconds")
+            assert time.time() - start_time < 120, "Did not finish commitlog replay within 120 seconds"
 
-        debug("Reconnecting to node")
+        logger.debug("Reconnecting to node")
         session = self.patient_cql_connection(node1)
-        debug("Make query to ensure data is present")
+        logger.debug("Make query to ensure data is present")
         res = list(session.execute("SELECT * FROM Test.mytable"))
-        self.assertEqual(num_rows, len(res), res)
+        assert num_rows == len(res), res
 
     def test_commitlog_replay_on_startup(self):
         """
@@ -232,7 +238,7 @@ class TestCommitLog(Tester):
         node1.set_batch_commitlog(enabled=True)
         node1.start()
 
-        debug("Insert data")
+        logger.debug("Insert data")
         session = self.patient_cql_connection(node1)
         create_ks(session, 'Test', 1)
         session.execute("""
@@ -247,69 +253,67 @@ class TestCommitLog(Tester):
         session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) "
                         "VALUES('gandalf', 'p@$$', 'male', 'WA', 1955);")
 
-        debug("Verify data is present")
+        logger.debug("Verify data is present")
         session = self.patient_cql_connection(node1)
         res = session.execute("SELECT * FROM Test. users")
-        self.assertItemsEqual(rows_to_list(res),
-                              [[u'gandalf', 1955, u'male', u'p@$$', u'WA']])
+        assert rows_to_list(res) == [['gandalf', 1955, 'male', 'p@$$', 'WA']]
 
-        debug("Stop node abruptly")
+        logger.debug("Stop node abruptly")
         node1.stop(gently=False)
 
-        debug("Verify commitlog was written before abrupt stop")
+        logger.debug("Verify commitlog was written before abrupt stop")
         commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
         commitlog_files = os.listdir(commitlog_dir)
-        self.assertTrue(len(commitlog_files) > 0)
+        assert len(commitlog_files) > 0
 
-        debug("Verify no SSTables were flushed before abrupt stop")
-        self.assertEqual(0, len(node1.get_sstables('test', 'users')))
+        logger.debug("Verify no SSTables were flushed before abrupt stop")
+        assert 0 == len(node1.get_sstables('test', 'users'))
 
-        debug("Verify commit log was replayed on startup")
+        logger.debug("Verify commit log was replayed on startup")
         node1.start()
         node1.watch_log_for("Log replay complete")
         # Here we verify from the logs that some mutations were replayed
         replays = [match_tuple[0] for match_tuple in node1.grep_log(" \d+ replayed mutations")]
-        debug('The following log lines indicate that mutations were replayed: {msgs}'.format(msgs=replays))
+        logger.debug('The following log lines indicate that mutations were replayed: {msgs}'.format(msgs=replays))
         num_replayed_mutations = [
             parse('{} {num_mutations:d} replayed mutations{}', line).named['num_mutations']
             for line in replays
         ]
         # assert there were some lines where more than zero mutations were replayed
-        self.assertNotEqual([m for m in num_replayed_mutations if m > 0], [])
+        assert [m for m in num_replayed_mutations if m > 0] != []
 
-        debug("Make query and ensure data is present")
+        logger.debug("Make query and ensure data is present")
         session = self.patient_cql_connection(node1)
         res = session.execute("SELECT * FROM Test. users")
-        self.assertItemsEqual(rows_to_list(res),
-                              [[u'gandalf', 1955, u'male', u'p@$$', u'WA']])
+        assert_lists_equal_ignoring_order(rows_to_list(res), [['gandalf', 1955, 'male', 'p@$$', 'WA']])
 
-    def default_segment_size_test(self):
+    def test_default_segment_size(self):
         """
         Test default commitlog_segment_size_in_mb (32MB)
         """
         self._segment_size_test(32)
 
-    def small_segment_size_test(self):
+    def test_small_segment_size(self):
         """
         Test a small commitlog_segment_size_in_mb (5MB)
         """
         self._segment_size_test(5)
 
     @since('2.2')
-    def default_compressed_segment_size_test(self):
+    def test_default_compressed_segment_size(self):
         """
         Test default compressed commitlog_segment_size_in_mb (32MB)
         """
         self._segment_size_test(32, compressed=True)
 
     @since('2.2')
-    def small_compressed_segment_size_test(self):
+    def test_small_compressed_segment_size(self):
         """
         Test a small compressed commitlog_segment_size_in_mb (5MB)
         """
         self._segment_size_test(5, compressed=True)
 
-    def stop_failure_policy_test(self):
+    def test_stop_failure_policy(self):
         """
         Test the stop commitlog failure policy (default one)
         """
@@ -317,23 +321,23 @@ class TestCommitLog(Tester):
 
         self._provoke_commitlog_failure()
         failure = self.node1.grep_log("Failed .+ commit log segments. Commit disk failure policy is stop; terminating thread")
-        debug(failure)
-        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
-        self.assertTrue(self.node1.is_running(), "Node1 should still be running")
+        logger.debug(failure)
+        assert failure, "Cannot find the commitlog failure message in logs"
+        assert self.node1.is_running(), "Node1 should still be running"
 
         # Cannot write anymore after the failure
-        with self.assertRaises(NoHostAvailable):
+        with pytest.raises(NoHostAvailable):
             self.session1.execute("""
               INSERT INTO test (key, col1) VALUES (2, 2);
             """)
 
         # Should not be able to read neither
-        with self.assertRaises(NoHostAvailable):
+        with pytest.raises(NoHostAvailable):
             self.session1.execute("""
               "SELECT * FROM test;"
             """)
 
-    def stop_commit_failure_policy_test(self):
+    def test_stop_commit_failure_policy(self):
         """
         Test the stop_commit commitlog failure policy
         """
@@ -347,26 +351,26 @@ class TestCommitLog(Tester):
 
         self._provoke_commitlog_failure()
         failure = self.node1.grep_log("Failed .+ commit log segments. Commit disk failure policy is stop_commit; terminating thread")
-        debug(failure)
-        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
-        self.assertTrue(self.node1.is_running(), "Node1 should still be running")
+        logger.debug(failure)
+        assert failure, "Cannot find the commitlog failure message in logs"
+        assert self.node1.is_running(), "Node1 should still be running"
 
         # Cannot write anymore after the failure
-        debug('attempting to insert to node with failing commitlog; should fail')
-        with self.assertRaises((OperationTimedOut, WriteTimeout)):
+        logger.debug('attempting to insert to node with failing commitlog; should fail')
+        with pytest.raises((OperationTimedOut, WriteTimeout)):
             self.session1.execute("""
               INSERT INTO test (key, col1) VALUES (2, 2);
             """)
 
         # Should be able to read
-        debug('attempting to read from node with failing commitlog; should succeed')
+        logger.debug('attempting to read from node with failing commitlog; should succeed')
         assert_one(
             self.session1,
             "SELECT * FROM test where key=2;",
             [2, 2]
         )
 
-    def die_failure_policy_test(self):
+    def test_die_failure_policy(self):
         """
         Test the die commitlog failure policy
         """
@@ -376,11 +380,11 @@ class TestCommitLog(Tester):
 
         self._provoke_commitlog_failure()
         failure = self.node1.grep_log("ERROR \[COMMIT-LOG-ALLOCATOR\].+JVM state determined to be unstable.  Exiting forcefully")
-        debug(failure)
-        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
-        self.assertFalse(self.node1.is_running(), "Node1 should not be running")
+        logger.debug(failure)
+        assert failure, "Cannot find the commitlog failure message in logs"
+        assert not self.node1.is_running(), "Node1 should not be running"
 
-    def ignore_failure_policy_test(self):
+    def test_ignore_failure_policy(self):
         """
         Test the ignore commitlog failure policy
         """
@@ -390,8 +394,8 @@ class TestCommitLog(Tester):
 
         self._provoke_commitlog_failure()
         failure = self.node1.grep_log("ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
-        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
-        self.assertTrue(self.node1.is_running(), "Node1 should still be running")
+        assert failure, "Cannot find the commitlog failure message in logs"
+        assert self.node1.is_running(), "Node1 should still be running"
 
         # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM,
         # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files
@@ -401,10 +405,10 @@ class TestCommitLog(Tester):
         if is_win():
             # We expect this to succeed
             self.session1.execute(query)
-            self.assertFalse(self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored.")
-            self.assertTrue(self.node1.is_running(), "Node1 should still be running after an ignore error on CL")
+            assert not self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored."
+            assert self.node1.is_running(), "Node1 should still be running after an ignore error on CL"
         else:
-            with self.assertRaises((OperationTimedOut, WriteTimeout)):
+            with pytest.raises((OperationTimedOut, WriteTimeout)):
                 self.session1.execute(query)
 
             # Should not exist
@@ -436,13 +440,11 @@ class TestCommitLog(Tester):
         and the commit_failure_policy is stop, C* shouldn't startup
         @jira_ticket CASSANDRA-9749
         """
-        if not hasattr(self, 'ignore_log_patterns'):
-            self.ignore_log_patterns = []
-
         expected_error = "Exiting due to error while processing commit log during initialization."
-        self.ignore_log_patterns.append(expected_error)
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            expected_error]
         node = self.node1
-        self.assertIsInstance(node, Node)
+        assert isinstance(node, Node)
         node.set_configuration_options({'commit_failure_policy': 'stop', 'commitlog_sync_period_in_ms': 1000})
         self.cluster.start()
 
@@ -454,7 +456,7 @@ class TestCommitLog(Tester):
             cursor.execute("INSERT INTO ks.tbl (k, v) VALUES ({0}, {0})".format(i))
 
         results = list(cursor.execute("SELECT * FROM ks.tbl"))
-        self.assertEqual(len(results), 10)
+        assert len(results) == 10
 
         # with the commitlog_sync_period_in_ms set to 1000,
         # this sleep guarantees that the commitlog data is
@@ -469,14 +471,14 @@ class TestCommitLog(Tester):
             ks_dir = os.path.join(data_dir, 'ks')
             db_dir = os.listdir(ks_dir)[0]
             sstables = len([f for f in os.listdir(os.path.join(ks_dir, db_dir)) if f.endswith('.db')])
-            self.assertEqual(sstables, 0)
+            assert sstables == 0
 
         # modify the commit log crc values
         cl_dir = os.path.join(path, 'commitlogs')
-        self.assertTrue(len(os.listdir(cl_dir)) > 0)
+        assert len(os.listdir(cl_dir)) > 0
         for cl in os.listdir(cl_dir):
             # locate the CRC location
-            with open(os.path.join(cl_dir, cl), 'r') as f:
+            with open(os.path.join(cl_dir, cl), 'rb') as f:
                 f.seek(0)
                 version = struct.unpack('>i', f.read(4))[0]
                 crc_pos = 12
@@ -486,22 +488,22 @@ class TestCommitLog(Tester):
                     crc_pos += 2 + psize
 
             # rewrite it with crap
-            with open(os.path.join(cl_dir, cl), 'w') as f:
+            with open(os.path.join(cl_dir, cl), 'wb') as f:
                 f.seek(crc_pos)
                 f.write(struct.pack('>i', 123456))
 
             # verify said crap
-            with open(os.path.join(cl_dir, cl), 'r') as f:
+            with open(os.path.join(cl_dir, cl), 'rb') as f:
                 f.seek(crc_pos)
                 crc = struct.unpack('>i', f.read(4))[0]
-                self.assertEqual(crc, 123456)
+                assert crc == 123456
 
         mark = node.mark_log()
         node.start()
         node.watch_log_for(expected_error, from_mark=mark)
-        with self.assertRaises(TimeoutError):
+        with pytest.raises(TimeoutError):
             node.wait_for_binary_interface(from_mark=mark, timeout=20)
-        self.assertFalse(node.is_running())
+        assert not node.is_running()
 
     @since('2.2')
     def test_compression_error(self):
@@ -510,13 +512,11 @@ class TestCommitLog(Tester):
         if the commit log header refers to an unknown compression class, and
         the commit_failure_policy is stop, C* shouldn't start up
         """
-        if not hasattr(self, 'ignore_log_patterns'):
-            self.ignore_log_patterns = []
-
         expected_error = 'Could not create Compression for type org.apache.cassandra.io.compress.LZ5Compressor'
-        self.ignore_log_patterns.append(expected_error)
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            expected_error]
         node = self.node1
-        self.assertIsInstance(node, Node)
+        assert isinstance(node, Node)
         node.set_configuration_options({'commit_failure_policy': 'stop',
                                         'commitlog_compression': [{'class_name': 'LZ4Compressor'}],
                                         'commitlog_sync_period_in_ms': 1000})
@@ -530,7 +530,7 @@ class TestCommitLog(Tester):
             cursor.execute("INSERT INTO ks1.tbl (k, v) VALUES ({0}, {0})".format(i))
 
         results = list(cursor.execute("SELECT * FROM ks1.tbl"))
-        self.assertEqual(len(results), 10)
+        assert len(results) == 10
 
         # with the commitlog_sync_period_in_ms set to 1000,
         # this sleep guarantees that the commitlog data is
@@ -545,31 +545,37 @@ class TestCommitLog(Tester):
             ks_dir = os.path.join(data_dir, 'ks1')
             db_dir = os.listdir(ks_dir)[0]
             sstables = sstables + len([f for f in os.listdir(os.path.join(ks_dir, db_dir)) if f.endswith('.db')])
-        self.assertEqual(sstables, 0)
+        assert sstables == 0
 
         def get_header_crc(header):
             """
             When calculating the header crc, C* splits up the 8b id, first adding the 4 least significant
             bytes to the crc, then the 5 most significant bytes, so this splits them and calculates the same way
             """
-            new_header = header[:4]
+            new_header = bytearray(header[:4])
             # C* evaluates most and least significant 4 bytes out of order
-            new_header += header[8:12]
-            new_header += header[4:8]
+            new_header.extend(header[8:12])
+            new_header.extend(header[4:8])
             # C* evaluates the short parameter length as an int
-            new_header += '\x00\x00' + header[12:14]  # the
-            new_header += header[14:]
-            return binascii.crc32(new_header)
+            new_header.extend(b'\x00\x00')
+            new_header.extend(header[12:14])  # the
+            new_header.extend(header[14:])
+
+            # https://docs.python.org/2/library/binascii.html
+            # "Changed in version 2.6: The return value is in the range [-2**31, 2**31-1] regardless
+            # of platform. In the past the value would be signed on some platforms and unsigned on
+            # others. Use & 0xffffffff on the value if you want it to match Python 3 behavior."
+            return binascii.crc32(new_header) & 0xffffffff
 
         # modify the compression parameters to look for a compressor that isn't there
         # while this scenario is pretty unlikely, if a jar or lib got moved or something,
         # you'd have a similar situation, which would be fixable by the user
         path = node.get_path()
         cl_dir = os.path.join(path, 'commitlogs')
-        self.assertTrue(len(os.listdir(cl_dir)) > 0)
+        assert len(os.listdir(cl_dir)) > 0
         for cl in os.listdir(cl_dir):
             # read the header and find the crc location
-            with open(os.path.join(cl_dir, cl), 'r') as f:
+            with open(os.path.join(cl_dir, cl), 'rb') as f:
                 f.seek(0)
                 crc_pos = 12
                 f.seek(crc_pos)
@@ -583,29 +589,39 @@ class TestCommitLog(Tester):
                 # check that we're going this right
                 f.seek(0)
                 header_bytes = f.read(header_length)
-                self.assertEqual(get_header_crc(header_bytes), crc)
+
+                # https://docs.python.org/2/library/binascii.html
+                # "Changed in version 2.6: The return value is in the range [-2**31, 2**31-1] regardless
+                # of platform. In the past the value would be signed on some platforms and unsigned on
+                # others. Use & 0xffffffff on the value if you want it to match Python 3 behavior."
+                assert get_header_crc(header_bytes) == (crc & 0xffffffff)
 
             # rewrite it with imaginary compressor
-            self.assertIn('LZ4Compressor', header_bytes)
-            header_bytes = header_bytes.replace('LZ4Compressor', 'LZ5Compressor')
-            self.assertNotIn('LZ4Compressor', header_bytes)
-            self.assertIn('LZ5Compressor', header_bytes)
-            with open(os.path.join(cl_dir, cl), 'w') as f:
+            assert 'LZ4Compressor'.encode("ascii") in header_bytes
+            header_bytes = header_bytes.replace('LZ4Compressor'.encode("ascii"), 'LZ5Compressor'.encode("ascii"))
+            assert 'LZ4Compressor'.encode("ascii") not in header_bytes
+            assert 'LZ5Compressor'.encode("ascii") in header_bytes
+            with open(os.path.join(cl_dir, cl), 'wb') as f:
                 f.seek(0)
                 f.write(header_bytes)
                 f.seek(crc_pos)
-                f.write(struct.pack('>i', get_header_crc(header_bytes)))
+                f.write(struct.pack('>I', get_header_crc(header_bytes)))
 
             # verify we wrote everything correctly
-            with open(os.path.join(cl_dir, cl), 'r') as f:
+            with open(os.path.join(cl_dir, cl), 'rb') as f:
                 f.seek(0)
-                self.assertEqual(f.read(header_length), header_bytes)
+                assert f.read(header_length) == header_bytes
                 f.seek(crc_pos)
                 crc = struct.unpack('>i', f.read(4))[0]
-                self.assertEqual(crc, get_header_crc(header_bytes))
+
+                # https://docs.python.org/2/library/binascii.html
+                # "Changed in version 2.6: The return value is in the range [-2**31, 2**31-1] regardless
+                # of platform. In the past the value would be signed on some platforms and unsigned on
+                # others. Use & 0xffffffff on the value if you want it to match Python 3 behavior."
+                assert (crc & 0xffffffff)  == get_header_crc(header_bytes)
 
         mark = node.mark_log()
         node.start()
         node.watch_log_for(expected_error, from_mark=mark)
-        with self.assertRaises(TimeoutError):
+        with pytest.raises(TimeoutError):
             node.wait_for_binary_interface(from_mark=mark, timeout=20)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/compaction_test.py
----------------------------------------------------------------------
diff --git a/compaction_test.py b/compaction_test.py
index 999f83c..49a2923 100644
--- a/compaction_test.py
+++ b/compaction_test.py
@@ -5,25 +5,29 @@ import string
 import tempfile
 import time
 from distutils.version import LooseVersion
-
+import pytest
 import parse
+import logging
 
-from dtest import Tester, debug, create_ks
+from dtest import Tester, create_ks
 from tools.assertions import assert_length_equal, assert_none, assert_one
-from tools.decorators import since
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
-class TestCompaction(Tester):
+strategies = ['LeveledCompactionStrategy', 'SizeTieredCompactionStrategy', 'DateTieredCompactionStrategy']
 
-    __test__ = False
 
-    def setUp(self):
-        Tester.setUp(self)
+class TestCompaction(Tester):
+
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_set_cluster_log_level(self, fixture_dtest_setup):
         # compaction test for version 2.2.2 and above relies on DEBUG log in debug.log
-        self.cluster.set_log_level("DEBUG")
+        fixture_dtest_setup.cluster.set_log_level("DEBUG")
 
-    @since('0', '2.2.X')
-    def compaction_delete_test(self):
+    @pytest.mark.parametrize("strategy", strategies)
+    @since('0', max_version='2.2.X')
+    def test_compaction_delete(self, strategy):
         """
         Test that executing a delete properly tombstones a row.
         Insert data, delete a partition of data and check that the requesite rows are tombstoned.
@@ -35,7 +39,7 @@ class TestCompaction(Tester):
         session = self.patient_cql_connection(node1)
         create_ks(session, 'ks', 1)
 
-        session.execute("create table ks.cf (key int PRIMARY KEY, val int) with compaction = {'class':'" + self.strategy + "'} and gc_grace_seconds = 30;")
+        session.execute("create table ks.cf (key int PRIMARY KEY, val int) with compaction = {'class':'" + strategy + "'} and gc_grace_seconds = 30;")
 
         for x in range(0, 100):
             session.execute('insert into cf (key, val) values (' + str(x) + ',1)')
@@ -58,9 +62,9 @@ class TestCompaction(Tester):
 
         numfound = jsoninfo.count("markedForDeleteAt")
 
-        self.assertEqual(numfound, 10)
+        assert numfound == 10
 
-    def data_size_test(self):
+    def test_data_size(self):
         """
         Ensure that data size does not have unwarranted increases after compaction.
         Insert data and check data size before and after a compaction.
@@ -80,8 +84,8 @@ class TestCompaction(Tester):
             output = output[output.find("Space used (live)"):]
             initialValue = int(output[output.find(":") + 1:output.find("\n")].strip())
         else:
-            debug("datasize not found")
-            debug(output)
+            logger.debug("datasize not found")
+            logger.debug(output)
 
         node1.flush()
         node1.compact()
@@ -93,31 +97,32 @@ class TestCompaction(Tester):
             output = output[output.find("Space used (live)"):]
             finalValue = int(output[output.find(":") + 1:output.find("\n")].strip())
         else:
-            debug("datasize not found")
+            logger.debug("datasize not found")
         # allow 5% size increase - if we have few sstables it is not impossible that live size increases *slightly* after compaction
-        self.assertLess(finalValue, initialValue * 1.05)
+        assert finalValue < initialValue * 1.05
 
-    def bloomfilter_size_test(self):
+    @pytest.mark.parametrize("strategy", strategies)
+    def test_bloomfilter_size(self, strategy):
         """
         @jira_ticket CASSANDRA-11344
         Check that bloom filter size is between 50KB and 100KB for 100K keys
         """
-        if not hasattr(self, 'strategy') or self.strategy == "LeveledCompactionStrategy":
+        if not hasattr(self, 'strategy') or strategy == "LeveledCompactionStrategy":
             strategy_string = 'strategy=LeveledCompactionStrategy,sstable_size_in_mb=1'
             min_bf_size = 40000
             max_bf_size = 100000
         else:
-            if self.strategy == "DateTieredCompactionStrategy":
+            if strategy == "DateTieredCompactionStrategy":
                 strategy_string = "strategy=DateTieredCompactionStrategy,base_time_seconds=86400"  # we want a single sstable, so make sure we don't have a tiny first window
             else:
-                strategy_string = "strategy={}".format(self.strategy)
+                strategy_string = "strategy={}".format(strategy)
             min_bf_size = 100000
             max_bf_size = 150000
         cluster = self.cluster
         cluster.populate(1).start(wait_for_binary_proto=True)
         [node1] = cluster.nodelist()
 
-        for x in xrange(0, 5):
+        for x in range(0, 5):
             node1.stress(['write', 'n=100K', "no-warmup", "cl=ONE", "-rate",
                           "threads=300", "-schema", "replication(factor=1)",
                           "compaction({},enabled=false)".format(strategy_string)])
@@ -134,36 +139,37 @@ class TestCompaction(Tester):
 
         # in some rare cases we can end up with more than one sstable per data directory with
         # non-lcs strategies (see CASSANDRA-12323)
-        if not hasattr(self, 'strategy') or self.strategy == "LeveledCompactionStrategy":
+        if not hasattr(self, 'strategy') or strategy == "LeveledCompactionStrategy":
             size_factor = 1
         else:
             sstable_count = len(node1.get_sstables('keyspace1', 'standard1'))
             dir_count = len(node1.data_directories())
-            debug("sstable_count is: {}".format(sstable_count))
-            debug("dir_count is: {}".format(dir_count))
+            logger.debug("sstable_count is: {}".format(sstable_count))
+            logger.debug("dir_count is: {}".format(dir_count))
             if node1.get_cassandra_version() < LooseVersion('3.2'):
                 size_factor = sstable_count
             else:
                 size_factor = sstable_count / float(dir_count)
 
-        debug("bloom filter size is: {}".format(bfSize))
-        debug("size factor = {}".format(size_factor))
-        self.assertGreaterEqual(bfSize, size_factor * min_bf_size)
-        self.assertLessEqual(bfSize, size_factor * max_bf_size)
+        logger.debug("bloom filter size is: {}".format(bfSize))
+        logger.debug("size factor = {}".format(size_factor))
+        assert bfSize >= size_factor * min_bf_size
+        assert bfSize <= size_factor * max_bf_size
 
-    def sstable_deletion_test(self):
+    @pytest.mark.parametrize("strategy", strategies)
+    def test_sstable_deletion(self, strategy):
         """
         Test that sstables are deleted properly when able after compaction.
         Insert data setting gc_grace_seconds to 0, and determine sstable
         is deleted upon data deletion.
         """
-        self.skip_if_no_major_compaction()
+        self.skip_if_no_major_compaction(strategy)
         cluster = self.cluster
         cluster.populate(1).start(wait_for_binary_proto=True)
         [node1] = cluster.nodelist()
         session = self.patient_cql_connection(node1)
         create_ks(session, 'ks', 1)
-        session.execute("create table cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'" + self.strategy + "'}")
+        session.execute("create table cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'" + strategy + "'}")
 
         for x in range(0, 100):
             session.execute('insert into cf (key, val) values (' + str(x) + ',1)')
@@ -180,22 +186,21 @@ class TestCompaction(Tester):
                 cfs = os.listdir(os.path.join(data_dir, "ks"))
                 ssdir = os.listdir(os.path.join(data_dir, "ks", cfs[0]))
                 for afile in ssdir:
-                    self.assertFalse("Data" in afile, afile)
+                    assert not "Data" in afile, afile
 
         except OSError:
             self.fail("Path to sstables not valid.")
 
-    def dtcs_deletion_test(self):
+    @pytest.mark.parametrize("strategy", ['DateTieredCompactionStrategy'])
+    def test_dtcs_deletion(self, strategy):
         """
         Test that sstables are deleted properly when able after compaction with
         DateTieredCompactionStrategy.
         Insert data setting max_sstable_age_days low, and determine sstable
         is deleted upon data deletion past max_sstable_age_days.
         """
-        if not hasattr(self, 'strategy'):
-            self.strategy = 'DateTieredCompactionStrategy'
-        elif self.strategy != 'DateTieredCompactionStrategy':
-            self.skipTest('Not implemented unless DateTieredCompactionStrategy is used')
+        if strategy != 'DateTieredCompactionStrategy':
+            pytest.skip('Not implemented unless DateTieredCompactionStrategy is used')
 
         cluster = self.cluster
         cluster.populate(1).start(wait_for_binary_proto=True)
@@ -215,7 +220,7 @@ class TestCompaction(Tester):
         expected_sstable_count = 1
         if self.cluster.version() > LooseVersion('3.1'):
             expected_sstable_count = cluster.data_dir_count
-        self.assertEqual(len(expired_sstables), expected_sstable_count)
+        assert len(expired_sstables) == expected_sstable_count
         # write a new sstable to make DTCS check for expired sstables:
         for x in range(0, 100):
             session.execute('insert into cf (key, val) values ({}, {})'.format(x, x))
@@ -223,7 +228,7 @@ class TestCompaction(Tester):
         time.sleep(5)
         # we only check every 10 minutes - sstable should still be there:
         for expired_sstable in expired_sstables:
-            self.assertIn(expired_sstable, node1.get_sstables('ks', 'cf'))
+            assert expired_sstable, node1.get_sstables('ks' in 'cf')
 
         session.execute("alter table cf with compaction =  {'class':'DateTieredCompactionStrategy', 'max_sstable_age_days':0.00035, 'min_threshold':2, 'expired_sstable_check_frequency_seconds':0}")
         time.sleep(1)
@@ -232,9 +237,9 @@ class TestCompaction(Tester):
         node1.flush()
         time.sleep(5)
         for expired_sstable in expired_sstables:
-            self.assertNotIn(expired_sstable, node1.get_sstables('ks', 'cf'))
+            assert expired_sstable, node1.get_sstables('ks' not in 'cf')
 
-    def compaction_throughput_test(self):
+    def test_compaction_throughput(self):
         """
         Test setting compaction throughput.
         Set throughput, insert data and ensure compaction performance corresponds.
@@ -277,24 +282,26 @@ class TestCompaction(Tester):
         }
 
         units = ['MB'] if cluster.version() < LooseVersion('3.6') else ['KiB', 'MiB', 'GiB']
-        self.assertIn(found_units, units)
+        assert found_units in units
 
-        debug(avgthroughput)
+        logger.debug(avgthroughput)
         avgthroughput_mb = unit_conversion_dct[found_units] * float(avgthroughput)
 
         # The throughput in the log is computed independantly from the throttling and on the output files while
         # throttling is on the input files, so while that throughput shouldn't be higher than the one set in
         # principle, a bit of wiggle room is expected
-        self.assertGreaterEqual(float(threshold) + 0.5, avgthroughput_mb)
+        assert float(threshold) + 0.5 >= avgthroughput_mb
 
-    def compaction_strategy_switching_test(self):
-        """Ensure that switching strategies does not result in problems.
+    @pytest.mark.parametrize("strategy", strategies)
+    def test_compaction_strategy_switching(self, strategy):
+        """
+        Ensure that switching strategies does not result in problems.
         Insert data, switch strategies, then check against data loss.
         """
         strategies = ['LeveledCompactionStrategy', 'SizeTieredCompactionStrategy', 'DateTieredCompactionStrategy']
 
-        if self.strategy in strategies:
-            strategies.remove(self.strategy)
+        if strategy in strategies:
+            strategies.remove(strategy)
             cluster = self.cluster
             cluster.populate(1).start(wait_for_binary_proto=True)
             [node1] = cluster.nodelist()
@@ -303,7 +310,7 @@ class TestCompaction(Tester):
                 session = self.patient_cql_connection(node1)
                 create_ks(session, 'ks', 1)
 
-                session.execute("create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'" + self.strategy + "'};")
+                session.execute("create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'" + strategy + "'};")
 
                 for x in range(0, 100):
                     session.execute('insert into ks.cf (key, val) values (' + str(x) + ',1)')
@@ -326,7 +333,7 @@ class TestCompaction(Tester):
                 time.sleep(5)
                 cluster.start(wait_for_binary_proto=True)
 
-    def large_compaction_warning_test(self):
+    def test_large_compaction_warning(self):
         """
         @jira_ticket CASSANDRA-9643
         Check that we log a warning when the partition size is bigger than compaction_large_partition_warning_threshold_mb
@@ -347,7 +354,7 @@ class TestCompaction(Tester):
 
         ret = list(session.execute("SELECT properties from ks.large where userid = 'user'"))
         assert_length_equal(ret, 1)
-        self.assertEqual(200, len(ret[0][0].keys()))
+        assert 200 == len(list(ret[0][0].keys()))
 
         node.flush()
 
@@ -358,9 +365,10 @@ class TestCompaction(Tester):
 
         ret = list(session.execute("SELECT properties from ks.large where userid = 'user'"))
         assert_length_equal(ret, 1)
-        self.assertEqual(200, len(ret[0][0].keys()))
+        assert 200 == len(list(ret[0][0].keys()))
 
-    def disable_autocompaction_nodetool_test(self):
+    @pytest.mark.parametrize("strategy", strategies)
+    def test_disable_autocompaction_nodetool(self, strategy):
         """
         Make sure we can enable/disable compaction using nodetool
         """
@@ -369,7 +377,7 @@ class TestCompaction(Tester):
         [node] = cluster.nodelist()
         session = self.patient_cql_connection(node)
         create_ks(session, 'ks', 1)
-        session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(self.strategy))
+        session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(strategy))
         node.nodetool('disableautocompaction ks to_disable')
         for i in range(1000):
             session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello' * 100))
@@ -379,13 +387,14 @@ class TestCompaction(Tester):
             log_file = 'system.log'
         else:
             log_file = 'debug.log'
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(strategy)
         node.nodetool('enableautocompaction ks to_disable')
         # sleep to allow compactions to start
         time.sleep(2)
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) > 0, 'Found no log items for {0}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) > 0, 'Found no log items for {0}'.format(strategy)
 
-    def disable_autocompaction_schema_test(self):
+    @pytest.mark.parametrize("strategy", strategies)
+    def test_disable_autocompaction_schema(self, strategy):
         """
         Make sure we can disable compaction via the schema compaction parameter 'enabled' = false
         """
@@ -394,7 +403,7 @@ class TestCompaction(Tester):
         [node] = cluster.nodelist()
         session = self.patient_cql_connection(node)
         create_ks(session, 'ks', 1)
-        session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\', \'enabled\':\'false\'}}'.format(self.strategy))
+        session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\', \'enabled\':\'false\'}}'.format(strategy))
         for i in range(1000):
             session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello' * 100))
             if i % 100 == 0:
@@ -404,7 +413,7 @@ class TestCompaction(Tester):
         else:
             log_file = 'debug.log'
 
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(strategy)
         # should still be disabled after restart:
         node.stop()
         node.start(wait_for_binary_proto=True)
@@ -412,13 +421,14 @@ class TestCompaction(Tester):
         session.execute("use ks")
         # sleep to make sure we dont start any logs
         time.sleep(2)
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(strategy)
         node.nodetool('enableautocompaction ks to_disable')
         # sleep to allow compactions to start
         time.sleep(2)
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) > 0, 'Found no log items for {0}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) > 0, 'Found no log items for {0}'.format(strategy)
 
-    def disable_autocompaction_alter_test(self):
+    @pytest.mark.parametrize("strategy", strategies)
+    def test_disable_autocompaction_alter(self, strategy):
         """
         Make sure we can enable compaction using an alter-statement
         """
@@ -427,8 +437,8 @@ class TestCompaction(Tester):
         [node] = cluster.nodelist()
         session = self.patient_cql_connection(node)
         create_ks(session, 'ks', 1)
-        session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(self.strategy))
-        session.execute('ALTER TABLE to_disable WITH compaction = {{\'class\':\'{0}\', \'enabled\':\'false\'}}'.format(self.strategy))
+        session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(strategy))
+        session.execute('ALTER TABLE to_disable WITH compaction = {{\'class\':\'{0}\', \'enabled\':\'false\'}}'.format(strategy))
         for i in range(1000):
             session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello' * 100))
             if i % 100 == 0:
@@ -437,16 +447,17 @@ class TestCompaction(Tester):
             log_file = 'system.log'
         else:
             log_file = 'debug.log'
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(self.strategy))
-        session.execute('ALTER TABLE to_disable WITH compaction = {{\'class\':\'{0}\', \'enabled\':\'true\'}}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(strategy)
+        session.execute('ALTER TABLE to_disable WITH compaction = {{\'class\':\'{0}\', \'enabled\':\'true\'}}'.format(strategy))
         # we need to flush atleast once when altering to enable:
         session.execute('insert into to_disable (id, d) values (99, \'hello\')')
         node.flush()
         # sleep to allow compactions to start
         time.sleep(2)
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) > 0, 'Found no log items for {0}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) > 0, 'Found no log items for {0}'.format(strategy)
 
-    def disable_autocompaction_alter_and_nodetool_test(self):
+    @pytest.mark.parametrize("strategy", strategies)
+    def test_disable_autocompaction_alter_and_nodetool(self, strategy):
         """
         Make sure compaction stays disabled after an alter statement where we have disabled using nodetool first
         """
@@ -455,7 +466,7 @@ class TestCompaction(Tester):
         [node] = cluster.nodelist()
         session = self.patient_cql_connection(node)
         create_ks(session, 'ks', 1)
-        session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(self.strategy))
+        session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(strategy))
         node.nodetool('disableautocompaction ks to_disable')
         for i in range(1000):
             session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello' * 100))
@@ -465,19 +476,19 @@ class TestCompaction(Tester):
             log_file = 'system.log'
         else:
             log_file = 'debug.log'
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(self.strategy))
-        session.execute('ALTER TABLE to_disable WITH compaction = {{\'class\':\'{0}\', \'tombstone_threshold\':0.9}}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found compaction log items for {0}'.format(strategy)
+        session.execute('ALTER TABLE to_disable WITH compaction = {{\'class\':\'{0}\', \'tombstone_threshold\':0.9}}'.format(strategy))
         session.execute('insert into to_disable (id, d) values (99, \'hello\')')
         node.flush()
         time.sleep(2)
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found log items for {0}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) == 0, 'Found log items for {0}'.format(strategy)
         node.nodetool('enableautocompaction ks to_disable')
         # sleep to allow compactions to start
         time.sleep(2)
-        self.assertTrue(len(node.grep_log('Compacting.+to_disable', filename=log_file)) > 0, 'Found no log items for {0}'.format(self.strategy))
+        assert len(node.grep_log('Compacting.+to_disable', filename=log_file)) > 0, 'Found no log items for {0}'.format(strategy)
 
     @since('3.7')
-    def user_defined_compaction_test(self):
+    def test_user_defined_compaction(self):
         """
         Test a user defined compaction task by generating a few sstables with cassandra stress
         and autocompaction disabled, and then passing a list of sstable data files directly to nodetool compact.
@@ -499,20 +510,21 @@ class TestCompaction(Tester):
         node1.nodetool('flush keyspace1 standard1')
 
         sstable_files = ' '.join(node1.get_sstable_data_files('keyspace1', 'standard1'))
-        debug('Compacting {}'.format(sstable_files))
+        logger.debug('Compacting {}'.format(sstable_files))
         node1.nodetool('compact --user-defined {}'.format(sstable_files))
 
         sstable_files = node1.get_sstable_data_files('keyspace1', 'standard1')
-        self.assertEquals(len(node1.data_directories()), len(sstable_files),
-                          'Expected one sstable data file per node directory but got {}'.format(sstable_files))
+        assert len(node1.data_directories()) == len(sstable_files), \
+            'Expected one sstable data file per node directory but got {}'.format(sstable_files)
 
+    @pytest.mark.parametrize("strategy", ['LeveledCompactionStrategy'])
     @since('3.10')
-    def fanout_size_test(self):
+    def test_fanout_size(self, strategy):
         """
         @jira_ticket CASSANDRA-11550
         """
-        if not hasattr(self, 'strategy') or self.strategy != 'LeveledCompactionStrategy':
-            self.skipTest('Not implemented unless LeveledCompactionStrategy is used')
+        if not hasattr(self, 'strategy') or strategy != 'LeveledCompactionStrategy':
+            pytest.skip('Not implemented unless LeveledCompactionStrategy is used')
 
         cluster = self.cluster
         cluster.populate(1).start(wait_for_binary_proto=True)
@@ -522,7 +534,7 @@ class TestCompaction(Tester):
         node1.nodetool('disableautocompaction')
 
         session = self.patient_cql_connection(node1)
-        debug("Altering compaction strategy to LCS")
+        logger.debug("Altering compaction strategy to LCS")
         session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1, 'fanout_size':10};")
 
         stress_write(node1, keycount=1000000)
@@ -538,9 +550,9 @@ class TestCompaction(Tester):
         # [0, ?/10, ?, 0, 0, 0...]
         p = re.compile(r'0,\s(\d+)/10,.*')
         m = p.search(output)
-        self.assertEqual(10 * len(node1.data_directories()), int(m.group(1)))
+        assert 10 * len(node1.data_directories()) == int(m.group(1))
 
-        debug("Altering the fanout_size")
+        logger.debug("Altering the fanout_size")
         session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1, 'fanout_size':5};")
 
         # trigger the compaction
@@ -551,12 +563,12 @@ class TestCompaction(Tester):
         # [0, ?/5, ?/25, ?, 0, 0...]
         p = re.compile(r'0,\s(\d+)/5,\s(\d+)/25,.*')
         m = p.search(output)
-        self.assertEqual(5 * len(node1.data_directories()), int(m.group(1)))
-        self.assertEqual(25 * len(node1.data_directories()), int(m.group(2)))
+        assert 5 * len(node1.data_directories()) == int(m.group(1))
+        assert 25 * len(node1.data_directories()) == int(m.group(2))
 
-    def skip_if_no_major_compaction(self):
-        if self.cluster.version() < '2.2' and self.strategy == 'LeveledCompactionStrategy':
-            self.skipTest('major compaction not implemented for LCS in this version of Cassandra')
+    def skip_if_no_major_compaction(self, strategy):
+        if self.cluster.version() < '2.2' and strategy == 'LeveledCompactionStrategy':
+            pytest.skip(msg='major compaction not implemented for LCS in this version of Cassandra')
 
 
 def grep_sstables_in_each_level(node, table_name):
@@ -567,14 +579,8 @@ def grep_sstables_in_each_level(node, table_name):
 
 
 def get_random_word(wordLen, population=string.ascii_letters + string.digits):
-    return ''.join([random.choice(population) for _ in range(wordLen)])
+    return ''.join([random.choice(population) for _ in range(int(wordLen))])
 
 
 def stress_write(node, keycount=100000):
     node.stress(['write', 'n={keycount}'.format(keycount=keycount)])
-
-
-strategies = ['LeveledCompactionStrategy', 'SizeTieredCompactionStrategy', 'DateTieredCompactionStrategy']
-for strategy in strategies:
-    cls_name = ('TestCompaction_with_' + strategy)
-    vars()[cls_name] = type(cls_name, (TestCompaction,), {'strategy': strategy, '__test__': True})

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/compression_test.py
----------------------------------------------------------------------
diff --git a/compression_test.py b/compression_test.py
index c8362c9..d865ba2 100644
--- a/compression_test.py
+++ b/compression_test.py
@@ -1,9 +1,13 @@
 import os
+import pytest
+import logging
 
 from dtest import create_ks
 from scrub_test import TestHelper
 from tools.assertions import assert_crc_check_chance_equal
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestCompression(TestHelper):
@@ -16,10 +20,10 @@ class TestCompression(TestHelper):
 
         with open(file, 'rb') as fh:
             file_start = fh.read(2)
-            return types.get(file_start.encode('hex'), 'UNKNOWN')
+            return types.get(file_start.hex(), 'UNKNOWN')
 
     @since("3.0")
-    def disable_compression_cql_test(self):
+    def test_disable_compression_cql(self):
         """
         @jira_ticket CASSANDRA-8384
         using new cql create table syntax to disable compression
@@ -33,7 +37,7 @@ class TestCompression(TestHelper):
         session.execute("create table disabled_compression_table (id uuid PRIMARY KEY ) WITH compression = {'enabled': false};")
         session.cluster.refresh_schema_metadata()
         meta = session.cluster.metadata.keyspaces['ks'].tables['disabled_compression_table']
-        self.assertEqual('false', meta.options['compression']['enabled'])
+        assert 'false' == meta.options['compression']['enabled']
 
         for n in range(0, 100):
             session.execute("insert into disabled_compression_table (id) values (uuid());")
@@ -44,12 +48,12 @@ class TestCompression(TestHelper):
         for sstable_path in sstable_paths:
             sstable = os.path.join(sstable_path, sstables['disabled_compression_table'][1])
             if os.path.exists(sstable):
-                self.assertEqual('NONE', self._get_compression_type(sstable))
+                assert 'NONE' == self._get_compression_type(sstable)
                 found = True
-        self.assertTrue(found)
+        assert found
 
     @since("3.0")
-    def compression_cql_options_test(self):
+    def test_compression_cql_options(self):
         """
         @jira_ticket CASSANDRA-8384
         using new cql create table syntax to configure compression
@@ -72,12 +76,12 @@ class TestCompression(TestHelper):
 
         session.cluster.refresh_schema_metadata()
         meta = session.cluster.metadata.keyspaces['ks'].tables['compression_opts_table']
-        self.assertEqual('org.apache.cassandra.io.compress.DeflateCompressor', meta.options['compression']['class'])
-        self.assertEqual('256', meta.options['compression']['chunk_length_in_kb'])
+        assert 'org.apache.cassandra.io.compress.DeflateCompressor' == meta.options['compression']['class']
+        assert '256' == meta.options['compression']['chunk_length_in_kb']
         assert_crc_check_chance_equal(session, "compression_opts_table", 0.25)
 
         warn = node.grep_log("The option crc_check_chance was deprecated as a compression option.")
-        self.assertEqual(len(warn), 0)
+        assert len(warn) == 0
         session.execute("""
             alter table compression_opts_table
                 WITH compression = {
@@ -87,13 +91,13 @@ class TestCompression(TestHelper):
                 }
             """)
         warn = node.grep_log("The option crc_check_chance was deprecated as a compression option.")
-        self.assertEqual(len(warn), 1)
+        assert len(warn) == 1
 
         # check metadata again after crc_check_chance_update
         session.cluster.refresh_schema_metadata()
         meta = session.cluster.metadata.keyspaces['ks'].tables['compression_opts_table']
-        self.assertEqual('org.apache.cassandra.io.compress.DeflateCompressor', meta.options['compression']['class'])
-        self.assertEqual('256', meta.options['compression']['chunk_length_in_kb'])
+        assert 'org.apache.cassandra.io.compress.DeflateCompressor' == meta.options['compression']['class']
+        assert '256' == meta.options['compression']['chunk_length_in_kb']
         assert_crc_check_chance_equal(session, "compression_opts_table", 0.6)
 
         for n in range(0, 100):
@@ -105,12 +109,12 @@ class TestCompression(TestHelper):
         for sstable_path in sstable_paths:
             sstable = os.path.join(sstable_path, sstables['compression_opts_table'][1])
             if os.path.exists(sstable):
-                self.assertEqual('DEFLATE', self._get_compression_type(sstable))
+                assert 'DEFLATE' == self._get_compression_type(sstable)
                 found = True
-        self.assertTrue(found)
+        assert found
 
     @since("3.0")
-    def compression_cql_disabled_with_alter_test(self):
+    def test_compression_cql_disabled_with_alter(self):
         """
         @jira_ticket CASSANDRA-8384
         starting with compression enabled then disabling it
@@ -131,17 +135,17 @@ class TestCompression(TestHelper):
                 AND crc_check_chance = 0.25;
             """)
         meta = session.cluster.metadata.keyspaces['ks'].tables['start_enabled_compression_table']
-        self.assertEqual('org.apache.cassandra.io.compress.SnappyCompressor', meta.options['compression']['class'])
-        self.assertEqual('256', meta.options['compression']['chunk_length_in_kb'])
+        assert 'org.apache.cassandra.io.compress.SnappyCompressor' == meta.options['compression']['class']
+        assert '256' == meta.options['compression']['chunk_length_in_kb']
         assert_crc_check_chance_equal(session, "start_enabled_compression_table", 0.25)
         session.execute("alter table start_enabled_compression_table with compression = {'enabled': false};")
 
         session.cluster.refresh_schema_metadata()
         meta = session.cluster.metadata.keyspaces['ks'].tables['start_enabled_compression_table']
-        self.assertEqual('false', meta.options['compression']['enabled'])
+        assert 'false' == meta.options['compression']['enabled']
 
     @since("3.0")
-    def compression_cql_enabled_with_alter_test(self):
+    def test_compression_cql_enabled_with_alter(self):
         """
         @jira_ticket CASSANDRA-8384
         starting with compression disabled and enabling it
@@ -154,7 +158,7 @@ class TestCompression(TestHelper):
         create_ks(session, 'ks', 1)
         session.execute("create table start_disabled_compression_table (id uuid PRIMARY KEY ) WITH compression = {'enabled': false};")
         meta = session.cluster.metadata.keyspaces['ks'].tables['start_disabled_compression_table']
-        self.assertEqual('false', meta.options['compression']['enabled'])
+        assert 'false' == meta.options['compression']['enabled']
         session.execute("""alter table start_disabled_compression_table
                                 WITH compression = {
                                         'class': 'SnappyCompressor',
@@ -163,6 +167,6 @@ class TestCompression(TestHelper):
 
         session.cluster.refresh_schema_metadata()
         meta = session.cluster.metadata.keyspaces['ks'].tables['start_disabled_compression_table']
-        self.assertEqual('org.apache.cassandra.io.compress.SnappyCompressor', meta.options['compression']['class'])
-        self.assertEqual('256', meta.options['compression']['chunk_length_in_kb'])
+        assert 'org.apache.cassandra.io.compress.SnappyCompressor' == meta.options['compression']['class']
+        assert '256' == meta.options['compression']['chunk_length_in_kb']
         assert_crc_check_chance_equal(session, "start_disabled_compression_table", 0.25)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/concurrent_schema_changes_test.py
----------------------------------------------------------------------
diff --git a/concurrent_schema_changes_test.py b/concurrent_schema_changes_test.py
index 49041f9..d0af49c 100644
--- a/concurrent_schema_changes_test.py
+++ b/concurrent_schema_changes_test.py
@@ -3,15 +3,19 @@ import os
 import pprint
 import re
 import time
+import pytest
+import logging
+
 from random import randrange
 from threading import Thread
-from unittest import skip
 
 from cassandra.concurrent import execute_concurrent
 from ccmlib.node import Node
 
-from dtest import Tester, debug, create_ks
-from tools.decorators import since
+from dtest import Tester, create_ks
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 def wait(delay=2):
@@ -21,7 +25,7 @@ def wait(delay=2):
     time.sleep(delay)
 
 
-@skip('awaiting CASSANDRA-10699')
+@pytest.mark.skip(reason='awaiting CASSANDRA-10699')
 class TestConcurrentSchemaChanges(Tester):
     allow_log_errors = True
 
@@ -29,7 +33,7 @@ class TestConcurrentSchemaChanges(Tester):
         """
         prepares for schema changes by creating a keyspace and column family.
         """
-        debug("prepare_for_changes() " + str(namespace))
+        logger.debug("prepare_for_changes() " + str(namespace))
         # create a keyspace that will be used
         create_ks(session, "ks_%s" % namespace, 2)
         session.execute('USE ks_%s' % namespace)
@@ -77,7 +81,7 @@ class TestConcurrentSchemaChanges(Tester):
         rebuild index (via jmx)
         set default_validation_class
         """
-        debug("make_schema_changes() " + str(namespace))
+        logger.debug("make_schema_changes() " + str(namespace))
         session.execute('USE ks_%s' % namespace)
         # drop keyspace
         session.execute('DROP KEYSPACE ks2_%s' % namespace)
@@ -117,14 +121,14 @@ class TestConcurrentSchemaChanges(Tester):
 
     def validate_schema_consistent(self, node):
         """ Makes sure that there is only one schema """
-        debug("validate_schema_consistent() " + node.name)
+        logger.debug("validate_schema_consistent() " + node.name)
 
         response = node.nodetool('describecluster').stdout
         schemas = response.split('Schema versions:')[1].strip()
         num_schemas = len(re.findall('\[.*?\]', schemas))
-        self.assertEqual(num_schemas, 1, "There were multiple schema versions: {}".format(pprint.pformat(schemas)))
+        assert num_schemas, 1 == "There were multiple schema versions: {}".format(pprint.pformat(schemas))
 
-    def create_lots_of_tables_concurrently_test(self):
+    def test_create_lots_of_tables_concurrently(self):
         """
         create tables across multiple threads concurrently
         """
@@ -141,18 +145,18 @@ class TestConcurrentSchemaChanges(Tester):
         results = execute_concurrent(session, cmds, raise_on_first_error=True, concurrency=200)
 
         for (success, result) in results:
-            self.assertTrue(success, "didn't get success on table create: {}".format(result))
+            assert success, "didn't get success on table create: {}".format(result)
 
         wait(10)
 
         session.cluster.refresh_schema_metadata()
         table_meta = session.cluster.metadata.keyspaces["lots_o_tables"].tables
-        self.assertEqual(250, len(table_meta))
+        assert 250 == len(table_meta)
         self.validate_schema_consistent(node1)
         self.validate_schema_consistent(node2)
         self.validate_schema_consistent(node3)
 
-    def create_lots_of_alters_concurrently_test(self):
+    def test_create_lots_of_alters_concurrently(self):
         """
         create alters across multiple threads concurrently
         """
@@ -169,26 +173,26 @@ class TestConcurrentSchemaChanges(Tester):
 
         cmds = [("alter table base_{0} add c_{1} int".format(randrange(0, 10), n), ()) for n in range(500)]
 
-        debug("executing 500 alters")
+        logger.debug("executing 500 alters")
         results = execute_concurrent(session, cmds, raise_on_first_error=True, concurrency=150)
 
         for (success, result) in results:
-            self.assertTrue(success, "didn't get success on table create: {}".format(result))
+            assert success, "didn't get success on table create: {}".format(result)
 
-        debug("waiting for alters to propagate")
+        logger.debug("waiting for alters to propagate")
         wait(30)
 
         session.cluster.refresh_schema_metadata()
         table_meta = session.cluster.metadata.keyspaces["lots_o_alters"].tables
-        column_ct = sum([len(table.columns) for table in table_meta.values()])
+        column_ct = sum([len(table.columns) for table in list(table_meta.values())])
 
         # primary key + alters
-        self.assertEqual(510, column_ct)
+        assert 510 == column_ct
         self.validate_schema_consistent(node1)
         self.validate_schema_consistent(node2)
         self.validate_schema_consistent(node3)
 
-    def create_lots_of_indexes_concurrently_test(self):
+    def test_create_lots_of_indexes_concurrently(self):
         """
         create indexes across multiple threads concurrently
         """
@@ -205,7 +209,7 @@ class TestConcurrentSchemaChanges(Tester):
                 session.execute("insert into base_{0} (id, c1, c2) values (uuid(), {1}, {2})".format(n, ins, ins))
         wait(5)
 
-        debug("creating indexes")
+        logger.debug("creating indexes")
         cmds = []
         for n in range(5):
             cmds.append(("create index ix_base_{0}_c1 on base_{0} (c1)".format(n), ()))
@@ -214,31 +218,31 @@ class TestConcurrentSchemaChanges(Tester):
         results = execute_concurrent(session, cmds, raise_on_first_error=True)
 
         for (success, result) in results:
-            self.assertTrue(success, "didn't get success on table create: {}".format(result))
+            assert success, "didn't get success on table create: {}".format(result)
 
         wait(5)
 
-        debug("validating schema and index list")
+        logger.debug("validating schema and index list")
         session.cluster.control_connection.wait_for_schema_agreement()
         session.cluster.refresh_schema_metadata()
         index_meta = session.cluster.metadata.keyspaces["lots_o_indexes"].indexes
         self.validate_schema_consistent(node1)
         self.validate_schema_consistent(node2)
-        self.assertEqual(10, len(index_meta))
+        assert 10 == len(index_meta)
         for n in range(5):
-            self.assertIn("ix_base_{0}_c1".format(n), index_meta)
-            self.assertIn("ix_base_{0}_c2".format(n), index_meta)
+            assert "ix_base_{0}_c1".format(n) in index_meta
+            assert "ix_base_{0}_c2".format(n) in index_meta
 
-        debug("waiting for indexes to fill in")
+        logger.debug("waiting for indexes to fill in")
         wait(45)
-        debug("querying all values by secondary index")
+        logger.debug("querying all values by secondary index")
         for n in range(5):
             for ins in range(1000):
-                self.assertEqual(1, len(list(session.execute("select * from base_{0} where c1 = {1}".format(n, ins)))))
-                self.assertEqual(1, len(list(session.execute("select * from base_{0} where c2 = {1}".format(n, ins)))))
+                assert 1 == len(list(session.execute("select * from base_{0} where c1 = {1}".format(n, ins))))
+                assert 1 == len(list(session.execute("select * from base_{0} where c2 = {1}".format(n, ))))
 
     @since('3.0')
-    def create_lots_of_mv_concurrently_test(self):
+    def test_create_lots_of_mv_concurrently(self):
         """
         create materialized views across multiple threads concurrently
         """
@@ -261,15 +265,15 @@ class TestConcurrentSchemaChanges(Tester):
                              "WHERE c{0} IS NOT NULL AND id IS NOT NULL PRIMARY KEY (c{0}, id)".format(n)))
             session.cluster.control_connection.wait_for_schema_agreement()
 
-        debug("waiting for indexes to fill in")
+        logger.debug("waiting for indexes to fill in")
         wait(60)
         result = list(session.execute(("SELECT * FROM system_schema.views "
                                        "WHERE keyspace_name='lots_o_views' AND base_table_name='source_data' ALLOW FILTERING")))
-        self.assertEqual(10, len(result), "missing some mv from source_data table")
+        assert 10, len(result) == "missing some mv from source_data table"
 
         for n in range(1, 11):
             result = list(session.execute("select * from src_by_c{0}".format(n)))
-            self.assertEqual(4000, len(result))
+            assert 4000 == len(result)
 
     def _do_lots_of_schema_actions(self, session):
         for n in range(20):
@@ -287,7 +291,7 @@ class TestConcurrentSchemaChanges(Tester):
 
         results = execute_concurrent(session, cmds, concurrency=100, raise_on_first_error=True)
         for (success, result) in results:
-            self.assertTrue(success, "didn't get success: {}".format(result))
+            assert success, "didn't get success: {}".format(result)
 
     def _verify_lots_of_schema_actions(self, session):
         session.cluster.control_connection.wait_for_schema_agreement()
@@ -302,7 +306,7 @@ class TestConcurrentSchemaChanges(Tester):
         table_meta = session.cluster.metadata.keyspaces["lots_o_churn"].tables
         errors = []
         for n in range(20):
-            self.assertTrue("new_table_{0}".format(n) in table_meta)
+            assert "new_table_{0}".format(n) in table_meta
 
             if 7 != len(table_meta["index_me_{0}".format(n)].indexes):
                 errors.append("index_me_{0} expected indexes ix_index_me_c0->7, got: {1}".format(n, sorted(list(table_meta["index_me_{0}".format(n)].indexes))))
@@ -313,9 +317,9 @@ class TestConcurrentSchemaChanges(Tester):
             if 8 != len(altered.columns):
                 errors.append("alter_me_{0} expected c1 -> c7, id, got: {1}".format(n, sorted(list(altered.columns))))
 
-        self.assertTrue(0 == len(errors), "\n".join(errors))
+        assert 0 == len(errors), "\n".join(errors)
 
-    def create_lots_of_schema_churn_test(self):
+    def test_create_lots_of_schema_churn(self):
         """
         create tables, indexes, alters across multiple threads concurrently
         """
@@ -327,11 +331,11 @@ class TestConcurrentSchemaChanges(Tester):
         session.execute("use lots_o_churn")
 
         self._do_lots_of_schema_actions(session)
-        debug("waiting for things to settle and sync")
+        logger.debug("waiting for things to settle and sync")
         wait(60)
         self._verify_lots_of_schema_actions(session)
 
-    def create_lots_of_schema_churn_with_node_down_test(self):
+    def test_create_lots_of_schema_churn_with_node_down(self):
         """
         create tables, indexes, alters across multiple threads concurrently with a node down
         """
@@ -346,15 +350,15 @@ class TestConcurrentSchemaChanges(Tester):
         self._do_lots_of_schema_actions(session)
         wait(15)
         node2.start(wait_other_notice=True)
-        debug("waiting for things to settle and sync")
+        logger.debug("waiting for things to settle and sync")
         wait(120)
         self._verify_lots_of_schema_actions(session)
 
-    def basic_test(self):
+    def test_basic(self):
         """
         make several schema changes on the same node.
         """
-        debug("basic_test()")
+        logger.debug("basic_test()")
 
         cluster = self.cluster
         cluster.populate(2).start()
@@ -366,8 +370,8 @@ class TestConcurrentSchemaChanges(Tester):
 
         self.make_schema_changes(session, namespace='ns1')
 
-    def changes_to_different_nodes_test(self):
-        debug("changes_to_different_nodes_test()")
+    def test_changes_to_different_nodes(self):
+        logger.debug("changes_to_different_nodes_test()")
         cluster = self.cluster
         cluster.populate(2).start()
         node1, node2 = cluster.nodelist()
@@ -389,13 +393,13 @@ class TestConcurrentSchemaChanges(Tester):
         # check both, just because we can
         self.validate_schema_consistent(node2)
 
-    def changes_while_node_down_test(self):
+    def test_changes_while_node_down(self):
         """
         makes schema changes while a node is down.
         Make schema changes to node 1 while node 2 is down.
         Then bring up 2 and make sure it gets the changes.
         """
-        debug("changes_while_node_down_test()")
+        logger.debug("changes_while_node_down_test()")
         cluster = self.cluster
         cluster.populate(2).start()
         node1, node2 = cluster.nodelist()
@@ -414,7 +418,7 @@ class TestConcurrentSchemaChanges(Tester):
         wait(20)
         self.validate_schema_consistent(node1)
 
-    def changes_while_node_toggle_test(self):
+    def test_changes_while_node_toggle(self):
         """
         makes schema changes while a node is down.
 
@@ -422,7 +426,7 @@ class TestConcurrentSchemaChanges(Tester):
         Bring down 2, bring up 1, and finally bring up 2.
         1 should get the changes.
         """
-        debug("changes_while_node_toggle_test()")
+        logger.debug("changes_while_node_toggle_test()")
         cluster = self.cluster
         cluster.populate(2).start()
         node1, node2 = cluster.nodelist()
@@ -441,8 +445,8 @@ class TestConcurrentSchemaChanges(Tester):
         wait(20)
         self.validate_schema_consistent(node1)
 
-    def decommission_node_test(self):
-        debug("decommission_node_test()")
+    def test_decommission_node(self):
+        logger.debug("decommission_node_test()")
         cluster = self.cluster
 
         cluster.populate(1)
@@ -490,8 +494,8 @@ class TestConcurrentSchemaChanges(Tester):
         wait(30)
         self.validate_schema_consistent(node1)
 
-    def snapshot_test(self):
-        debug("snapshot_test()")
+    def test_snapshot(self):
+        logger.debug("snapshot_test()")
         cluster = self.cluster
         cluster.populate(2).start()
         node1, node2 = cluster.nodelist()
@@ -535,11 +539,11 @@ class TestConcurrentSchemaChanges(Tester):
         wait(2)
         self.validate_schema_consistent(node1)
 
-    def load_test(self):
+    def test_load(self):
         """
         apply schema changes while the cluster is under load.
         """
-        debug("load_test()")
+        logger.debug("load_test()")
 
         cluster = self.cluster
         cluster.populate(1).start()
@@ -548,14 +552,14 @@ class TestConcurrentSchemaChanges(Tester):
         session = self.cql_connection(node1)
 
         def stress(args=[]):
-            debug("Stressing")
+            logger.debug("Stressing")
             node1.stress(args)
-            debug("Done Stressing")
+            logger.debug("Done Stressing")
 
         def compact():
-            debug("Compacting...")
+            logger.debug("Compacting...")
             node1.nodetool('compact')
-            debug("Done Compacting.")
+            logger.debug("Done Compacting.")
 
         # put some data into the cluster
         stress(['write', 'n=30000', 'no-warmup', '-rate', 'threads=8'])

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/configuration_test.py
----------------------------------------------------------------------
diff --git a/configuration_test.py b/configuration_test.py
index 2f696eb..6bb5e95 100644
--- a/configuration_test.py
+++ b/configuration_test.py
@@ -1,16 +1,30 @@
 import os
-
+import logging
 import parse
+import pytest
+
 from cassandra.concurrent import execute_concurrent_with_args
 
-from dtest import Tester, debug, create_ks
+from tools.misc import ImmutableMapping
+from dtest_setup_overrides import DTestSetupOverrides
+from dtest import Tester, create_ks
 from tools.jmxutils import (JolokiaAgent, make_mbean,
                             remove_perf_disable_shared_mem)
 
+logger = logging.getLogger(__name__)
+
+
+@pytest.fixture()
+def fixture_dtest_setup_overrides(request):
+    dtest_setup_overrides = DTestSetupOverrides()
+    if request.node.name == "test_change_durable_writes":
+        dtest_setup_overrides.cluster_options = ImmutableMapping({'commitlog_segment_size_in_mb': 1})
+    return dtest_setup_overrides
+
 
 class TestConfiguration(Tester):
 
-    def compression_chunk_length_test(self):
+    def test_compression_chunk_length(self):
         """ Verify the setting of compression chunk_length [#3558]"""
         cluster = self.cluster
 
@@ -20,7 +34,9 @@ class TestConfiguration(Tester):
         create_ks(session, 'ks', 1)
 
         create_table_query = "CREATE TABLE test_table (row varchar, name varchar, value int, PRIMARY KEY (row, name));"
-        alter_chunk_len_query = "ALTER TABLE test_table WITH compression = {{'sstable_compression' : 'SnappyCompressor', 'chunk_length_kb' : {chunk_length}}};"
+        alter_chunk_len_query = "ALTER TABLE test_table WITH " \
+                                "compression = {{'sstable_compression' : 'SnappyCompressor', " \
+                                "'chunk_length_kb' : {chunk_length}}};"
 
         session.execute(create_table_query)
 
@@ -30,7 +46,8 @@ class TestConfiguration(Tester):
         session.execute(alter_chunk_len_query.format(chunk_length=64))
         self._check_chunk_length(session, 64)
 
-    def change_durable_writes_test(self):
+    @pytest.mark.timeout(60*30)
+    def test_change_durable_writes(self):
         """
         @jira_ticket CASSANDRA-9560
 
@@ -51,15 +68,14 @@ class TestConfiguration(Tester):
         """
         def new_commitlog_cluster_node():
             # writes should block on commitlog fsync
-            self.cluster.populate(1)
-            node = self.cluster.nodelist()[0]
-            self.cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
-            self.cluster.set_batch_commitlog(enabled=True)
+            self.fixture_dtest_setup.cluster.populate(1)
+            node = self.fixture_dtest_setup.cluster.nodelist()[0]
+            self.fixture_dtest_setup.cluster.set_batch_commitlog(enabled=True)
 
             # disable JVM option so we can use Jolokia
-            # this has to happen after .set_configuration_options because of implmentation details
+            # this has to happen after .set_configuration_options because of implementation details
             remove_perf_disable_shared_mem(node)
-            self.cluster.start(wait_for_binary_proto=True)
+            self.fixture_dtest_setup.cluster.start(wait_for_binary_proto=True)
             return node
 
         durable_node = new_commitlog_cluster_node()
@@ -70,16 +86,15 @@ class TestConfiguration(Tester):
         durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
                                 "AND DURABLE_WRITES = true")
         durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
-        debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
+        logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
         write_to_trigger_fsync(durable_session, 'ks', 'tab')
 
-        self.assertGreater(commitlog_size(durable_node), durable_init_size,
-                           msg='This test will not work in this environment; '
-                               'write_to_trigger_fsync does not trigger fsync.')
+        assert commitlog_size(durable_node) > durable_init_size, \
+            "This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."
 
         # get a fresh cluster to work on
-        self.tearDown()
-        self.setUp()
+        durable_session.shutdown()
+        self.fixture_dtest_setup.cleanup_and_replace_cluster()
 
         node = new_commitlog_cluster_node()
         init_size = commitlog_size(node)
@@ -91,8 +106,7 @@ class TestConfiguration(Tester):
         session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
         session.execute('ALTER KEYSPACE ks WITH DURABLE_WRITES=true')
         write_to_trigger_fsync(session, 'ks', 'tab')
-        self.assertGreater(commitlog_size(node), init_size,
-                           msg='ALTER KEYSPACE was not respected')
+        assert commitlog_size(node) > init_size, "ALTER KEYSPACE was not respected"
 
     def overlapping_data_folders(self):
         """
@@ -130,12 +144,13 @@ class TestConfiguration(Tester):
             if 'compression' in result:
                 params = result
 
-        self.assertNotEqual(params, '', "Looking for the string 'sstable_compression', but could not find it in {str}".format(str=result))
+        assert not params == '', "Looking for the string 'sstable_compression', but could not find " \
+                                 "it in {str}".format(str=result)
 
         chunk_string = "chunk_length_kb" if self.cluster.version() < '3.0' else "chunk_length_in_kb"
         chunk_length = parse.search("'" + chunk_string + "': '{chunk_length:d}'", result).named['chunk_length']
 
-        self.assertEqual(chunk_length, value, "Expected chunk_length: {}.  We got: {}".format(value, chunk_length))
+        assert chunk_length == value, "Expected chunk_length: {}.  We got: {}".format(value, chunk_length)
 
 
 def write_to_trigger_fsync(session, ks, table):
@@ -145,9 +160,17 @@ def write_to_trigger_fsync(session, ks, table):
     commitlog_segment_size_in_mb is 1. Assumes the table's columns are
     (key int, a int, b int, c int).
     """
+    """
+    From https://github.com/datastax/python-driver/pull/877/files
+      "Note: in the case that `generators` are used, it is important to ensure the consumers do not
+       block or attempt further synchronous requests, because no further IO will be processed until
+       the consumer returns. This may also produce a deadlock in the IO event thread."
+    """
     execute_concurrent_with_args(session,
-                                 session.prepare('INSERT INTO "{ks}"."{table}" (key, a, b, c) VALUES (?, ?, ?, ?)'.format(ks=ks, table=table)),
-                                 ((x, x + 1, x + 2, x + 3) for x in range(50000)))
+                                 session.prepare('INSERT INTO "{ks}"."{table}" (key, a, b, c) '
+                                                 'VALUES (?, ?, ?, ?)'.format(ks=ks, table=table)),
+                                 ((x, x + 1, x + 2, x + 3)
+                                 for x in range(50000)), concurrency=5)
 
 
 def commitlog_size(node):


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[16/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/thrift010/Cassandra.py
----------------------------------------------------------------------
diff --git a/thrift_bindings/thrift010/Cassandra.py b/thrift_bindings/thrift010/Cassandra.py
new file mode 100644
index 0000000..06fd579
--- /dev/null
+++ b/thrift_bindings/thrift010/Cassandra.py
@@ -0,0 +1,10961 @@
+#
+# Autogenerated by Thrift Compiler (0.10.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
+from thrift.protocol.TProtocol import TProtocolException
+import sys
+import logging
+from .ttypes import *
+from thrift.Thrift import TProcessor
+from thrift.transport import TTransport
+
+
+class Iface(object):
+    def login(self, auth_request):
+        """
+        Parameters:
+         - auth_request
+        """
+        pass
+
+    def set_keyspace(self, keyspace):
+        """
+        Parameters:
+         - keyspace
+        """
+        pass
+
+    def get(self, key, column_path, consistency_level):
+        """
+        Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
+        the only method that can throw an exception under non-failure conditions.)
+
+        Parameters:
+         - key
+         - column_path
+         - consistency_level
+        """
+        pass
+
+    def get_slice(self, key, column_parent, predicate, consistency_level):
+        """
+        Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
+        pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
+
+        Parameters:
+         - key
+         - column_parent
+         - predicate
+         - consistency_level
+        """
+        pass
+
+    def get_count(self, key, column_parent, predicate, consistency_level):
+        """
+        returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
+        <code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
+
+        Parameters:
+         - key
+         - column_parent
+         - predicate
+         - consistency_level
+        """
+        pass
+
+    def multiget_slice(self, keys, column_parent, predicate, consistency_level):
+        """
+        Performs a get_slice for column_parent and predicate for the given keys in parallel.
+
+        Parameters:
+         - keys
+         - column_parent
+         - predicate
+         - consistency_level
+        """
+        pass
+
+    def multiget_count(self, keys, column_parent, predicate, consistency_level):
+        """
+        Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
+
+        Parameters:
+         - keys
+         - column_parent
+         - predicate
+         - consistency_level
+        """
+        pass
+
+    def get_range_slices(self, column_parent, predicate, range, consistency_level):
+        """
+        returns a subset of columns for a contiguous range of keys.
+
+        Parameters:
+         - column_parent
+         - predicate
+         - range
+         - consistency_level
+        """
+        pass
+
+    def get_paged_slice(self, column_family, range, start_column, consistency_level):
+        """
+        returns a range of columns, wrapping to the next rows if necessary to collect max_results.
+
+        Parameters:
+         - column_family
+         - range
+         - start_column
+         - consistency_level
+        """
+        pass
+
+    def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
+        """
+        Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
+        @deprecated use get_range_slices instead with range.row_filter specified
+
+        Parameters:
+         - column_parent
+         - index_clause
+         - column_predicate
+         - consistency_level
+        """
+        pass
+
+    def insert(self, key, column_parent, column, consistency_level):
+        """
+        Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
+
+        Parameters:
+         - key
+         - column_parent
+         - column
+         - consistency_level
+        """
+        pass
+
+    def add(self, key, column_parent, column, consistency_level):
+        """
+        Increment or decrement a counter.
+
+        Parameters:
+         - key
+         - column_parent
+         - column
+         - consistency_level
+        """
+        pass
+
+    def cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
+        """
+        Atomic compare and set.
+
+        If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values.
+        Otherwise, success will be false and current_values will contain the current values for the columns in
+        expected (that, by definition of compare-and-set, will differ from the values in expected).
+
+        A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the
+        level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL.
+        The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This
+        is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact
+        the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is
+        guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If
+        commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see
+        the write.
+
+        Parameters:
+         - key
+         - column_family
+         - expected
+         - updates
+         - serial_consistency_level
+         - commit_consistency_level
+        """
+        pass
+
+    def remove(self, key, column_path, timestamp, consistency_level):
+        """
+        Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
+        that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
+        row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
+
+        Parameters:
+         - key
+         - column_path
+         - timestamp
+         - consistency_level
+        """
+        pass
+
+    def remove_counter(self, key, path, consistency_level):
+        """
+        Remove a counter at the specified location.
+        Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
+        until the delete has reached all the nodes and all of them have been fully compacted.
+
+        Parameters:
+         - key
+         - path
+         - consistency_level
+        """
+        pass
+
+    def batch_mutate(self, mutation_map, consistency_level):
+        """
+          Mutate many columns or super columns for many row keys. See also: Mutation.
+
+          mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
+        *
+
+        Parameters:
+         - mutation_map
+         - consistency_level
+        """
+        pass
+
+    def atomic_batch_mutate(self, mutation_map, consistency_level):
+        """
+          Atomically mutate many columns or super columns for many row keys. See also: Mutation.
+
+          mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
+        *
+
+        Parameters:
+         - mutation_map
+         - consistency_level
+        """
+        pass
+
+    def truncate(self, cfname):
+        """
+        Truncate will mark and entire column family as deleted.
+        From the user's perspective a successful call to truncate will result complete data deletion from cfname.
+        Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
+        only marks the data as deleted.
+        The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
+        some hosts are down.
+
+        Parameters:
+         - cfname
+        """
+        pass
+
+    def get_multi_slice(self, request):
+        """
+        Select multiple slices of a key in a single RPC operation
+
+        Parameters:
+         - request
+        """
+        pass
+
+    def describe_schema_versions(self):
+        """
+        for each schema version present in the cluster, returns a list of nodes at that version.
+        hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
+        the cluster is all on the same version if the size of the map is 1.
+        """
+        pass
+
+    def describe_keyspaces(self):
+        """
+        list the defined keyspaces in this cluster
+        """
+        pass
+
+    def describe_cluster_name(self):
+        """
+        get the cluster name
+        """
+        pass
+
+    def describe_version(self):
+        """
+        get the thrift api version
+        """
+        pass
+
+    def describe_ring(self, keyspace):
+        """
+        get the token ring: a map of ranges to host addresses,
+        represented as a set of TokenRange instead of a map from range
+        to list of endpoints, because you can't use Thrift structs as
+        map keys:
+        https://issues.apache.org/jira/browse/THRIFT-162
+
+        for the same reason, we can't return a set here, even though
+        order is neither important nor predictable.
+
+        Parameters:
+         - keyspace
+        """
+        pass
+
+    def describe_local_ring(self, keyspace):
+        """
+        same as describe_ring, but considers only nodes in the local DC
+
+        Parameters:
+         - keyspace
+        """
+        pass
+
+    def describe_token_map(self):
+        """
+        get the mapping between token->node ip
+        without taking replication into consideration
+        https://issues.apache.org/jira/browse/CASSANDRA-4092
+        """
+        pass
+
+    def describe_partitioner(self):
+        """
+        returns the partitioner used by this cluster
+        """
+        pass
+
+    def describe_snitch(self):
+        """
+        returns the snitch used by this cluster
+        """
+        pass
+
+    def describe_keyspace(self, keyspace):
+        """
+        describe specified keyspace
+
+        Parameters:
+         - keyspace
+        """
+        pass
+
+    def describe_splits(self, cfName, start_token, end_token, keys_per_split):
+        """
+        experimental API for hadoop/parallel query support.
+        may change violently and without warning.
+
+        returns list of token strings such that first subrange is (list[0], list[1]],
+        next is (list[1], list[2]], etc.
+
+        Parameters:
+         - cfName
+         - start_token
+         - end_token
+         - keys_per_split
+        """
+        pass
+
+    def trace_next_query(self):
+        """
+        Enables tracing for the next query in this connection and returns the UUID for that trace session
+        The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
+        """
+        pass
+
+    def describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
+        """
+        Parameters:
+         - cfName
+         - start_token
+         - end_token
+         - keys_per_split
+        """
+        pass
+
+    def system_add_column_family(self, cf_def):
+        """
+        adds a column family. returns the new schema id.
+
+        Parameters:
+         - cf_def
+        """
+        pass
+
+    def system_drop_column_family(self, column_family):
+        """
+        drops a column family. returns the new schema id.
+
+        Parameters:
+         - column_family
+        """
+        pass
+
+    def system_add_keyspace(self, ks_def):
+        """
+        adds a keyspace and any column families that are part of it. returns the new schema id.
+
+        Parameters:
+         - ks_def
+        """
+        pass
+
+    def system_drop_keyspace(self, keyspace):
+        """
+        drops a keyspace and any column families that are part of it. returns the new schema id.
+
+        Parameters:
+         - keyspace
+        """
+        pass
+
+    def system_update_keyspace(self, ks_def):
+        """
+        updates properties of a keyspace. returns the new schema id.
+
+        Parameters:
+         - ks_def
+        """
+        pass
+
+    def system_update_column_family(self, cf_def):
+        """
+        updates properties of a column family. returns the new schema id.
+
+        Parameters:
+         - cf_def
+        """
+        pass
+
+    def execute_cql_query(self, query, compression):
+        """
+        @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
+
+        Parameters:
+         - query
+         - compression
+        """
+        pass
+
+    def execute_cql3_query(self, query, compression, consistency):
+        """
+        Executes a CQL3 (Cassandra Query Language) statement and returns a
+        CqlResult containing the results.
+
+        Parameters:
+         - query
+         - compression
+         - consistency
+        """
+        pass
+
+    def prepare_cql_query(self, query, compression):
+        """
+        @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
+
+        Parameters:
+         - query
+         - compression
+        """
+        pass
+
+    def prepare_cql3_query(self, query, compression):
+        """
+        Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning
+        - the type of CQL statement
+        - an id token of the compiled CQL stored on the server side.
+        - a count of the discovered bound markers in the statement
+
+        Parameters:
+         - query
+         - compression
+        """
+        pass
+
+    def execute_prepared_cql_query(self, itemId, values):
+        """
+        @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
+
+        Parameters:
+         - itemId
+         - values
+        """
+        pass
+
+    def execute_prepared_cql3_query(self, itemId, values, consistency):
+        """
+        Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables
+        to bind, and the consistency level, and returns a CqlResult containing the results.
+
+        Parameters:
+         - itemId
+         - values
+         - consistency
+        """
+        pass
+
+    def set_cql_version(self, version):
+        """
+        @deprecated This is now a no-op. Please use the CQL3 specific methods instead.
+
+        Parameters:
+         - version
+        """
+        pass
+
+
+class Client(Iface):
+    def __init__(self, iprot, oprot=None):
+        self._iprot = self._oprot = iprot
+        if oprot is not None:
+            self._oprot = oprot
+        self._seqid = 0
+
+    def login(self, auth_request):
+        """
+        Parameters:
+         - auth_request
+        """
+        self.send_login(auth_request)
+        self.recv_login()
+
+    def send_login(self, auth_request):
+        self._oprot.writeMessageBegin('login', TMessageType.CALL, self._seqid)
+        args = login_args()
+        args.auth_request = auth_request
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_login(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = login_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.authnx is not None:
+            raise result.authnx
+        if result.authzx is not None:
+            raise result.authzx
+        return
+
+    def set_keyspace(self, keyspace):
+        """
+        Parameters:
+         - keyspace
+        """
+        self.send_set_keyspace(keyspace)
+        self.recv_set_keyspace()
+
+    def send_set_keyspace(self, keyspace):
+        self._oprot.writeMessageBegin('set_keyspace', TMessageType.CALL, self._seqid)
+        args = set_keyspace_args()
+        args.keyspace = keyspace
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_set_keyspace(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = set_keyspace_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        return
+
+    def get(self, key, column_path, consistency_level):
+        """
+        Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
+        the only method that can throw an exception under non-failure conditions.)
+
+        Parameters:
+         - key
+         - column_path
+         - consistency_level
+        """
+        self.send_get(key, column_path, consistency_level)
+        return self.recv_get()
+
+    def send_get(self, key, column_path, consistency_level):
+        self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
+        args = get_args()
+        args.key = key
+        args.column_path = column_path
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_get(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = get_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.nfe is not None:
+            raise result.nfe
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result")
+
+    def get_slice(self, key, column_parent, predicate, consistency_level):
+        """
+        Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
+        pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
+
+        Parameters:
+         - key
+         - column_parent
+         - predicate
+         - consistency_level
+        """
+        self.send_get_slice(key, column_parent, predicate, consistency_level)
+        return self.recv_get_slice()
+
+    def send_get_slice(self, key, column_parent, predicate, consistency_level):
+        self._oprot.writeMessageBegin('get_slice', TMessageType.CALL, self._seqid)
+        args = get_slice_args()
+        args.key = key
+        args.column_parent = column_parent
+        args.predicate = predicate
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_get_slice(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = get_slice_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "get_slice failed: unknown result")
+
+    def get_count(self, key, column_parent, predicate, consistency_level):
+        """
+        returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
+        <code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
+
+        Parameters:
+         - key
+         - column_parent
+         - predicate
+         - consistency_level
+        """
+        self.send_get_count(key, column_parent, predicate, consistency_level)
+        return self.recv_get_count()
+
+    def send_get_count(self, key, column_parent, predicate, consistency_level):
+        self._oprot.writeMessageBegin('get_count', TMessageType.CALL, self._seqid)
+        args = get_count_args()
+        args.key = key
+        args.column_parent = column_parent
+        args.predicate = predicate
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_get_count(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = get_count_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "get_count failed: unknown result")
+
+    def multiget_slice(self, keys, column_parent, predicate, consistency_level):
+        """
+        Performs a get_slice for column_parent and predicate for the given keys in parallel.
+
+        Parameters:
+         - keys
+         - column_parent
+         - predicate
+         - consistency_level
+        """
+        self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
+        return self.recv_multiget_slice()
+
+    def send_multiget_slice(self, keys, column_parent, predicate, consistency_level):
+        self._oprot.writeMessageBegin('multiget_slice', TMessageType.CALL, self._seqid)
+        args = multiget_slice_args()
+        args.keys = keys
+        args.column_parent = column_parent
+        args.predicate = predicate
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_multiget_slice(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = multiget_slice_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_slice failed: unknown result")
+
+    def multiget_count(self, keys, column_parent, predicate, consistency_level):
+        """
+        Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
+
+        Parameters:
+         - keys
+         - column_parent
+         - predicate
+         - consistency_level
+        """
+        self.send_multiget_count(keys, column_parent, predicate, consistency_level)
+        return self.recv_multiget_count()
+
+    def send_multiget_count(self, keys, column_parent, predicate, consistency_level):
+        self._oprot.writeMessageBegin('multiget_count', TMessageType.CALL, self._seqid)
+        args = multiget_count_args()
+        args.keys = keys
+        args.column_parent = column_parent
+        args.predicate = predicate
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_multiget_count(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = multiget_count_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_count failed: unknown result")
+
+    def get_range_slices(self, column_parent, predicate, range, consistency_level):
+        """
+        returns a subset of columns for a contiguous range of keys.
+
+        Parameters:
+         - column_parent
+         - predicate
+         - range
+         - consistency_level
+        """
+        self.send_get_range_slices(column_parent, predicate, range, consistency_level)
+        return self.recv_get_range_slices()
+
+    def send_get_range_slices(self, column_parent, predicate, range, consistency_level):
+        self._oprot.writeMessageBegin('get_range_slices', TMessageType.CALL, self._seqid)
+        args = get_range_slices_args()
+        args.column_parent = column_parent
+        args.predicate = predicate
+        args.range = range
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_get_range_slices(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = get_range_slices_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "get_range_slices failed: unknown result")
+
+    def get_paged_slice(self, column_family, range, start_column, consistency_level):
+        """
+        returns a range of columns, wrapping to the next rows if necessary to collect max_results.
+
+        Parameters:
+         - column_family
+         - range
+         - start_column
+         - consistency_level
+        """
+        self.send_get_paged_slice(column_family, range, start_column, consistency_level)
+        return self.recv_get_paged_slice()
+
+    def send_get_paged_slice(self, column_family, range, start_column, consistency_level):
+        self._oprot.writeMessageBegin('get_paged_slice', TMessageType.CALL, self._seqid)
+        args = get_paged_slice_args()
+        args.column_family = column_family
+        args.range = range
+        args.start_column = start_column
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_get_paged_slice(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = get_paged_slice_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "get_paged_slice failed: unknown result")
+
+    def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
+        """
+        Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
+        @deprecated use get_range_slices instead with range.row_filter specified
+
+        Parameters:
+         - column_parent
+         - index_clause
+         - column_predicate
+         - consistency_level
+        """
+        self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level)
+        return self.recv_get_indexed_slices()
+
+    def send_get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
+        self._oprot.writeMessageBegin('get_indexed_slices', TMessageType.CALL, self._seqid)
+        args = get_indexed_slices_args()
+        args.column_parent = column_parent
+        args.index_clause = index_clause
+        args.column_predicate = column_predicate
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_get_indexed_slices(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = get_indexed_slices_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "get_indexed_slices failed: unknown result")
+
+    def insert(self, key, column_parent, column, consistency_level):
+        """
+        Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
+
+        Parameters:
+         - key
+         - column_parent
+         - column
+         - consistency_level
+        """
+        self.send_insert(key, column_parent, column, consistency_level)
+        self.recv_insert()
+
+    def send_insert(self, key, column_parent, column, consistency_level):
+        self._oprot.writeMessageBegin('insert', TMessageType.CALL, self._seqid)
+        args = insert_args()
+        args.key = key
+        args.column_parent = column_parent
+        args.column = column
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_insert(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = insert_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        return
+
+    def add(self, key, column_parent, column, consistency_level):
+        """
+        Increment or decrement a counter.
+
+        Parameters:
+         - key
+         - column_parent
+         - column
+         - consistency_level
+        """
+        self.send_add(key, column_parent, column, consistency_level)
+        self.recv_add()
+
+    def send_add(self, key, column_parent, column, consistency_level):
+        self._oprot.writeMessageBegin('add', TMessageType.CALL, self._seqid)
+        args = add_args()
+        args.key = key
+        args.column_parent = column_parent
+        args.column = column
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_add(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = add_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        return
+
+    def cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
+        """
+        Atomic compare and set.
+
+        If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values.
+        Otherwise, success will be false and current_values will contain the current values for the columns in
+        expected (that, by definition of compare-and-set, will differ from the values in expected).
+
+        A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the
+        level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL.
+        The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This
+        is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact
+        the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is
+        guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If
+        commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see
+        the write.
+
+        Parameters:
+         - key
+         - column_family
+         - expected
+         - updates
+         - serial_consistency_level
+         - commit_consistency_level
+        """
+        self.send_cas(key, column_family, expected, updates, serial_consistency_level, commit_consistency_level)
+        return self.recv_cas()
+
+    def send_cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
+        self._oprot.writeMessageBegin('cas', TMessageType.CALL, self._seqid)
+        args = cas_args()
+        args.key = key
+        args.column_family = column_family
+        args.expected = expected
+        args.updates = updates
+        args.serial_consistency_level = serial_consistency_level
+        args.commit_consistency_level = commit_consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_cas(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = cas_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "cas failed: unknown result")
+
+    def remove(self, key, column_path, timestamp, consistency_level):
+        """
+        Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
+        that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
+        row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
+
+        Parameters:
+         - key
+         - column_path
+         - timestamp
+         - consistency_level
+        """
+        self.send_remove(key, column_path, timestamp, consistency_level)
+        self.recv_remove()
+
+    def send_remove(self, key, column_path, timestamp, consistency_level):
+        self._oprot.writeMessageBegin('remove', TMessageType.CALL, self._seqid)
+        args = remove_args()
+        args.key = key
+        args.column_path = column_path
+        args.timestamp = timestamp
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_remove(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = remove_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        return
+
+    def remove_counter(self, key, path, consistency_level):
+        """
+        Remove a counter at the specified location.
+        Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
+        until the delete has reached all the nodes and all of them have been fully compacted.
+
+        Parameters:
+         - key
+         - path
+         - consistency_level
+        """
+        self.send_remove_counter(key, path, consistency_level)
+        self.recv_remove_counter()
+
+    def send_remove_counter(self, key, path, consistency_level):
+        self._oprot.writeMessageBegin('remove_counter', TMessageType.CALL, self._seqid)
+        args = remove_counter_args()
+        args.key = key
+        args.path = path
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_remove_counter(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = remove_counter_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        return
+
+    def batch_mutate(self, mutation_map, consistency_level):
+        """
+          Mutate many columns or super columns for many row keys. See also: Mutation.
+
+          mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
+        *
+
+        Parameters:
+         - mutation_map
+         - consistency_level
+        """
+        self.send_batch_mutate(mutation_map, consistency_level)
+        self.recv_batch_mutate()
+
+    def send_batch_mutate(self, mutation_map, consistency_level):
+        self._oprot.writeMessageBegin('batch_mutate', TMessageType.CALL, self._seqid)
+        args = batch_mutate_args()
+        args.mutation_map = mutation_map
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_batch_mutate(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = batch_mutate_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        return
+
+    def atomic_batch_mutate(self, mutation_map, consistency_level):
+        """
+          Atomically mutate many columns or super columns for many row keys. See also: Mutation.
+
+          mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
+        *
+
+        Parameters:
+         - mutation_map
+         - consistency_level
+        """
+        self.send_atomic_batch_mutate(mutation_map, consistency_level)
+        self.recv_atomic_batch_mutate()
+
+    def send_atomic_batch_mutate(self, mutation_map, consistency_level):
+        self._oprot.writeMessageBegin('atomic_batch_mutate', TMessageType.CALL, self._seqid)
+        args = atomic_batch_mutate_args()
+        args.mutation_map = mutation_map
+        args.consistency_level = consistency_level
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_atomic_batch_mutate(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = atomic_batch_mutate_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        return
+
+    def truncate(self, cfname):
+        """
+        Truncate will mark and entire column family as deleted.
+        From the user's perspective a successful call to truncate will result complete data deletion from cfname.
+        Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
+        only marks the data as deleted.
+        The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
+        some hosts are down.
+
+        Parameters:
+         - cfname
+        """
+        self.send_truncate(cfname)
+        self.recv_truncate()
+
+    def send_truncate(self, cfname):
+        self._oprot.writeMessageBegin('truncate', TMessageType.CALL, self._seqid)
+        args = truncate_args()
+        args.cfname = cfname
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_truncate(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = truncate_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        return
+
+    def get_multi_slice(self, request):
+        """
+        Select multiple slices of a key in a single RPC operation
+
+        Parameters:
+         - request
+        """
+        self.send_get_multi_slice(request)
+        return self.recv_get_multi_slice()
+
+    def send_get_multi_slice(self, request):
+        self._oprot.writeMessageBegin('get_multi_slice', TMessageType.CALL, self._seqid)
+        args = get_multi_slice_args()
+        args.request = request
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_get_multi_slice(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = get_multi_slice_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "get_multi_slice failed: unknown result")
+
+    def describe_schema_versions(self):
+        """
+        for each schema version present in the cluster, returns a list of nodes at that version.
+        hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
+        the cluster is all on the same version if the size of the map is 1.
+        """
+        self.send_describe_schema_versions()
+        return self.recv_describe_schema_versions()
+
+    def send_describe_schema_versions(self):
+        self._oprot.writeMessageBegin('describe_schema_versions', TMessageType.CALL, self._seqid)
+        args = describe_schema_versions_args()
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_schema_versions(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_schema_versions_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_schema_versions failed: unknown result")
+
+    def describe_keyspaces(self):
+        """
+        list the defined keyspaces in this cluster
+        """
+        self.send_describe_keyspaces()
+        return self.recv_describe_keyspaces()
+
+    def send_describe_keyspaces(self):
+        self._oprot.writeMessageBegin('describe_keyspaces', TMessageType.CALL, self._seqid)
+        args = describe_keyspaces_args()
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_keyspaces(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_keyspaces_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_keyspaces failed: unknown result")
+
+    def describe_cluster_name(self):
+        """
+        get the cluster name
+        """
+        self.send_describe_cluster_name()
+        return self.recv_describe_cluster_name()
+
+    def send_describe_cluster_name(self):
+        self._oprot.writeMessageBegin('describe_cluster_name', TMessageType.CALL, self._seqid)
+        args = describe_cluster_name_args()
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_cluster_name(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_cluster_name_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_cluster_name failed: unknown result")
+
+    def describe_version(self):
+        """
+        get the thrift api version
+        """
+        self.send_describe_version()
+        return self.recv_describe_version()
+
+    def send_describe_version(self):
+        self._oprot.writeMessageBegin('describe_version', TMessageType.CALL, self._seqid)
+        args = describe_version_args()
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_version(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_version_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_version failed: unknown result")
+
+    def describe_ring(self, keyspace):
+        """
+        get the token ring: a map of ranges to host addresses,
+        represented as a set of TokenRange instead of a map from range
+        to list of endpoints, because you can't use Thrift structs as
+        map keys:
+        https://issues.apache.org/jira/browse/THRIFT-162
+
+        for the same reason, we can't return a set here, even though
+        order is neither important nor predictable.
+
+        Parameters:
+         - keyspace
+        """
+        self.send_describe_ring(keyspace)
+        return self.recv_describe_ring()
+
+    def send_describe_ring(self, keyspace):
+        self._oprot.writeMessageBegin('describe_ring', TMessageType.CALL, self._seqid)
+        args = describe_ring_args()
+        args.keyspace = keyspace
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_ring(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_ring_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_ring failed: unknown result")
+
+    def describe_local_ring(self, keyspace):
+        """
+        same as describe_ring, but considers only nodes in the local DC
+
+        Parameters:
+         - keyspace
+        """
+        self.send_describe_local_ring(keyspace)
+        return self.recv_describe_local_ring()
+
+    def send_describe_local_ring(self, keyspace):
+        self._oprot.writeMessageBegin('describe_local_ring', TMessageType.CALL, self._seqid)
+        args = describe_local_ring_args()
+        args.keyspace = keyspace
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_local_ring(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_local_ring_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_local_ring failed: unknown result")
+
+    def describe_token_map(self):
+        """
+        get the mapping between token->node ip
+        without taking replication into consideration
+        https://issues.apache.org/jira/browse/CASSANDRA-4092
+        """
+        self.send_describe_token_map()
+        return self.recv_describe_token_map()
+
+    def send_describe_token_map(self):
+        self._oprot.writeMessageBegin('describe_token_map', TMessageType.CALL, self._seqid)
+        args = describe_token_map_args()
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_token_map(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_token_map_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_token_map failed: unknown result")
+
+    def describe_partitioner(self):
+        """
+        returns the partitioner used by this cluster
+        """
+        self.send_describe_partitioner()
+        return self.recv_describe_partitioner()
+
+    def send_describe_partitioner(self):
+        self._oprot.writeMessageBegin('describe_partitioner', TMessageType.CALL, self._seqid)
+        args = describe_partitioner_args()
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_partitioner(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_partitioner_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_partitioner failed: unknown result")
+
+    def describe_snitch(self):
+        """
+        returns the snitch used by this cluster
+        """
+        self.send_describe_snitch()
+        return self.recv_describe_snitch()
+
+    def send_describe_snitch(self):
+        self._oprot.writeMessageBegin('describe_snitch', TMessageType.CALL, self._seqid)
+        args = describe_snitch_args()
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_snitch(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_snitch_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_snitch failed: unknown result")
+
+    def describe_keyspace(self, keyspace):
+        """
+        describe specified keyspace
+
+        Parameters:
+         - keyspace
+        """
+        self.send_describe_keyspace(keyspace)
+        return self.recv_describe_keyspace()
+
+    def send_describe_keyspace(self, keyspace):
+        self._oprot.writeMessageBegin('describe_keyspace', TMessageType.CALL, self._seqid)
+        args = describe_keyspace_args()
+        args.keyspace = keyspace
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_keyspace(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_keyspace_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.nfe is not None:
+            raise result.nfe
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_keyspace failed: unknown result")
+
+    def describe_splits(self, cfName, start_token, end_token, keys_per_split):
+        """
+        experimental API for hadoop/parallel query support.
+        may change violently and without warning.
+
+        returns list of token strings such that first subrange is (list[0], list[1]],
+        next is (list[1], list[2]], etc.
+
+        Parameters:
+         - cfName
+         - start_token
+         - end_token
+         - keys_per_split
+        """
+        self.send_describe_splits(cfName, start_token, end_token, keys_per_split)
+        return self.recv_describe_splits()
+
+    def send_describe_splits(self, cfName, start_token, end_token, keys_per_split):
+        self._oprot.writeMessageBegin('describe_splits', TMessageType.CALL, self._seqid)
+        args = describe_splits_args()
+        args.cfName = cfName
+        args.start_token = start_token
+        args.end_token = end_token
+        args.keys_per_split = keys_per_split
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_splits(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_splits_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits failed: unknown result")
+
+    def trace_next_query(self):
+        """
+        Enables tracing for the next query in this connection and returns the UUID for that trace session
+        The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
+        """
+        self.send_trace_next_query()
+        return self.recv_trace_next_query()
+
+    def send_trace_next_query(self):
+        self._oprot.writeMessageBegin('trace_next_query', TMessageType.CALL, self._seqid)
+        args = trace_next_query_args()
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_trace_next_query(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = trace_next_query_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "trace_next_query failed: unknown result")
+
+    def describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
+        """
+        Parameters:
+         - cfName
+         - start_token
+         - end_token
+         - keys_per_split
+        """
+        self.send_describe_splits_ex(cfName, start_token, end_token, keys_per_split)
+        return self.recv_describe_splits_ex()
+
+    def send_describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
+        self._oprot.writeMessageBegin('describe_splits_ex', TMessageType.CALL, self._seqid)
+        args = describe_splits_ex_args()
+        args.cfName = cfName
+        args.start_token = start_token
+        args.end_token = end_token
+        args.keys_per_split = keys_per_split
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_describe_splits_ex(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = describe_splits_ex_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits_ex failed: unknown result")
+
+    def system_add_column_family(self, cf_def):
+        """
+        adds a column family. returns the new schema id.
+
+        Parameters:
+         - cf_def
+        """
+        self.send_system_add_column_family(cf_def)
+        return self.recv_system_add_column_family()
+
+    def send_system_add_column_family(self, cf_def):
+        self._oprot.writeMessageBegin('system_add_column_family', TMessageType.CALL, self._seqid)
+        args = system_add_column_family_args()
+        args.cf_def = cf_def
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_system_add_column_family(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = system_add_column_family_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "system_add_column_family failed: unknown result")
+
+    def system_drop_column_family(self, column_family):
+        """
+        drops a column family. returns the new schema id.
+
+        Parameters:
+         - column_family
+        """
+        self.send_system_drop_column_family(column_family)
+        return self.recv_system_drop_column_family()
+
+    def send_system_drop_column_family(self, column_family):
+        self._oprot.writeMessageBegin('system_drop_column_family', TMessageType.CALL, self._seqid)
+        args = system_drop_column_family_args()
+        args.column_family = column_family
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_system_drop_column_family(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = system_drop_column_family_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_column_family failed: unknown result")
+
+    def system_add_keyspace(self, ks_def):
+        """
+        adds a keyspace and any column families that are part of it. returns the new schema id.
+
+        Parameters:
+         - ks_def
+        """
+        self.send_system_add_keyspace(ks_def)
+        return self.recv_system_add_keyspace()
+
+    def send_system_add_keyspace(self, ks_def):
+        self._oprot.writeMessageBegin('system_add_keyspace', TMessageType.CALL, self._seqid)
+        args = system_add_keyspace_args()
+        args.ks_def = ks_def
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_system_add_keyspace(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = system_add_keyspace_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "system_add_keyspace failed: unknown result")
+
+    def system_drop_keyspace(self, keyspace):
+        """
+        drops a keyspace and any column families that are part of it. returns the new schema id.
+
+        Parameters:
+         - keyspace
+        """
+        self.send_system_drop_keyspace(keyspace)
+        return self.recv_system_drop_keyspace()
+
+    def send_system_drop_keyspace(self, keyspace):
+        self._oprot.writeMessageBegin('system_drop_keyspace', TMessageType.CALL, self._seqid)
+        args = system_drop_keyspace_args()
+        args.keyspace = keyspace
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_system_drop_keyspace(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = system_drop_keyspace_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_keyspace failed: unknown result")
+
+    def system_update_keyspace(self, ks_def):
+        """
+        updates properties of a keyspace. returns the new schema id.
+
+        Parameters:
+         - ks_def
+        """
+        self.send_system_update_keyspace(ks_def)
+        return self.recv_system_update_keyspace()
+
+    def send_system_update_keyspace(self, ks_def):
+        self._oprot.writeMessageBegin('system_update_keyspace', TMessageType.CALL, self._seqid)
+        args = system_update_keyspace_args()
+        args.ks_def = ks_def
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_system_update_keyspace(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = system_update_keyspace_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "system_update_keyspace failed: unknown result")
+
+    def system_update_column_family(self, cf_def):
+        """
+        updates properties of a column family. returns the new schema id.
+
+        Parameters:
+         - cf_def
+        """
+        self.send_system_update_column_family(cf_def)
+        return self.recv_system_update_column_family()
+
+    def send_system_update_column_family(self, cf_def):
+        self._oprot.writeMessageBegin('system_update_column_family', TMessageType.CALL, self._seqid)
+        args = system_update_column_family_args()
+        args.cf_def = cf_def
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_system_update_column_family(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = system_update_column_family_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "system_update_column_family failed: unknown result")
+
+    def execute_cql_query(self, query, compression):
+        """
+        @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
+
+        Parameters:
+         - query
+         - compression
+        """
+        self.send_execute_cql_query(query, compression)
+        return self.recv_execute_cql_query()
+
+    def send_execute_cql_query(self, query, compression):
+        self._oprot.writeMessageBegin('execute_cql_query', TMessageType.CALL, self._seqid)
+        args = execute_cql_query_args()
+        args.query = query
+        args.compression = compression
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_execute_cql_query(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = execute_cql_query_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_cql_query failed: unknown result")
+
+    def execute_cql3_query(self, query, compression, consistency):
+        """
+        Executes a CQL3 (Cassandra Query Language) statement and returns a
+        CqlResult containing the results.
+
+        Parameters:
+         - query
+         - compression
+         - consistency
+        """
+        self.send_execute_cql3_query(query, compression, consistency)
+        return self.recv_execute_cql3_query()
+
+    def send_execute_cql3_query(self, query, compression, consistency):
+        self._oprot.writeMessageBegin('execute_cql3_query', TMessageType.CALL, self._seqid)
+        args = execute_cql3_query_args()
+        args.query = query
+        args.compression = compression
+        args.consistency = consistency
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_execute_cql3_query(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = execute_cql3_query_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_cql3_query failed: unknown result")
+
+    def prepare_cql_query(self, query, compression):
+        """
+        @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
+
+        Parameters:
+         - query
+         - compression
+        """
+        self.send_prepare_cql_query(query, compression)
+        return self.recv_prepare_cql_query()
+
+    def send_prepare_cql_query(self, query, compression):
+        self._oprot.writeMessageBegin('prepare_cql_query', TMessageType.CALL, self._seqid)
+        args = prepare_cql_query_args()
+        args.query = query
+        args.compression = compression
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_prepare_cql_query(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = prepare_cql_query_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "prepare_cql_query failed: unknown result")
+
+    def prepare_cql3_query(self, query, compression):
+        """
+        Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning
+        - the type of CQL statement
+        - an id token of the compiled CQL stored on the server side.
+        - a count of the discovered bound markers in the statement
+
+        Parameters:
+         - query
+         - compression
+        """
+        self.send_prepare_cql3_query(query, compression)
+        return self.recv_prepare_cql3_query()
+
+    def send_prepare_cql3_query(self, query, compression):
+        self._oprot.writeMessageBegin('prepare_cql3_query', TMessageType.CALL, self._seqid)
+        args = prepare_cql3_query_args()
+        args.query = query
+        args.compression = compression
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_prepare_cql3_query(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = prepare_cql3_query_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "prepare_cql3_query failed: unknown result")
+
+    def execute_prepared_cql_query(self, itemId, values):
+        """
+        @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
+
+        Parameters:
+         - itemId
+         - values
+        """
+        self.send_execute_prepared_cql_query(itemId, values)
+        return self.recv_execute_prepared_cql_query()
+
+    def send_execute_prepared_cql_query(self, itemId, values):
+        self._oprot.writeMessageBegin('execute_prepared_cql_query', TMessageType.CALL, self._seqid)
+        args = execute_prepared_cql_query_args()
+        args.itemId = itemId
+        args.values = values
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_execute_prepared_cql_query(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = execute_prepared_cql_query_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_prepared_cql_query failed: unknown result")
+
+    def execute_prepared_cql3_query(self, itemId, values, consistency):
+        """
+        Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables
+        to bind, and the consistency level, and returns a CqlResult containing the results.
+
+        Parameters:
+         - itemId
+         - values
+         - consistency
+        """
+        self.send_execute_prepared_cql3_query(itemId, values, consistency)
+        return self.recv_execute_prepared_cql3_query()
+
+    def send_execute_prepared_cql3_query(self, itemId, values, consistency):
+        self._oprot.writeMessageBegin('execute_prepared_cql3_query', TMessageType.CALL, self._seqid)
+        args = execute_prepared_cql3_query_args()
+        args.itemId = itemId
+        args.values = values
+        args.consistency = consistency
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_execute_prepared_cql3_query(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = execute_prepared_cql3_query_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.success is not None:
+            return result.success
+        if result.ire is not None:
+            raise result.ire
+        if result.ue is not None:
+            raise result.ue
+        if result.te is not None:
+            raise result.te
+        if result.sde is not None:
+            raise result.sde
+        raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_prepared_cql3_query failed: unknown result")
+
+    def set_cql_version(self, version):
+        """
+        @deprecated This is now a no-op. Please use the CQL3 specific methods instead.
+
+        Parameters:
+         - version
+        """
+        self.send_set_cql_version(version)
+        self.recv_set_cql_version()
+
+    def send_set_cql_version(self, version):
+        self._oprot.writeMessageBegin('set_cql_version', TMessageType.CALL, self._seqid)
+        args = set_cql_version_args()
+        args.version = version
+        args.write(self._oprot)
+        self._oprot.writeMessageEnd()
+        self._oprot.trans.flush()
+
+    def recv_set_cql_version(self):
+        iprot = self._iprot
+        (fname, mtype, rseqid) = iprot.readMessageBegin()
+        if mtype == TMessageType.EXCEPTION:
+            x = TApplicationException()
+            x.read(iprot)
+            iprot.readMessageEnd()
+            raise x
+        result = set_cql_version_result()
+        result.read(iprot)
+        iprot.readMessageEnd()
+        if result.ire is not None:
+            raise result.ire
+        return
+
+
+class Processor(Iface, TProcessor):
+    def __init__(self, handler):
+        self._handler = handler
+        self._processMap = {}
+        self._processMap["login"] = Processor.process_login
+        self._processMap["set_keyspace"] = Processor.process_set_keyspace
+        self._processMap["get"] = Processor.process_get
+        self._processMap["get_slice"] = Processor.process_get_slice
+        self._processMap["get_count"] = Processor.process_get_count
+        self._processMap["multiget_slice"] = Processor.process_multiget_slice
+        self._processMap["multiget_count"] = Processor.process_multiget_count
+        self._processMap["get_range_slices"] = Processor.process_get_range_slices
+        self._processMap["get_paged_slice"] = Processor.process_get_paged_slice
+        self._processMap["get_indexed_slices"] = Processor.process_get_indexed_slices
+        self._processMap["insert"] = Processor.process_insert
+        self._processMap["add"] = Processor.process_add
+        self._processMap["cas"] = Processor.process_cas
+        self._processMap["remove"] = Processor.process_remove
+        self._processMap["remove_counter"] = Processor.process_remove_counter
+        self._processMap["batch_mutate"] = Processor.process_batch_mutate
+        self._processMap["atomic_batch_mutate"] = Processor.process_atomic_batch_mutate
+        self._processMap["truncate"] = Processor.process_truncate
+        self._processMap["get_multi_slice"] = Processor.process_get_multi_slice
+        self._processMap["describe_schema_versions"] = Processor.process_describe_schema_versions
+        self._processMap["describe_keyspaces"] = Processor.process_describe_keyspaces
+        self._processMap["describe_cluster_name"] = Processor.process_describe_cluster_name
+        self._processMap["describe_version"] = Processor.process_describe_version
+        self._processMap["describe_ring"] = Processor.process_describe_ring
+        self._processMap["describe_local_ring"] = Processor.process_describe_local_ring
+        self._processMap["describe_token_map"] = Processor.process_describe_token_map
+        self._processMap["describe_partitioner"] = Processor.process_describe_partitioner
+        self._processMap["describe_snitch"] = Processor.process_describe_snitch
+        self._processMap["describe_keyspace"] = Processor.process_describe_keyspace
+        self._processMap["describe_splits"] = Processor.process_describe_splits
+        self._processMap["trace_next_query"] = Processor.process_trace_next_query
+        self._processMap["describe_splits_ex"] = Processor.process_describe_splits_ex
+        self._processMap["system_add_column_family"] = Processor.process_system_add_column_family
+        self._processMap["system_drop_column_family"] = Processor.process_system_drop_column_family
+        self._processMap["system_add_keyspace"] = Processor.process_system_add_keyspace
+        self._processMap["system_drop_keyspace"] = Processor.process_system_drop_keyspace
+        self._processMap["system_update_keyspace"] = Processor.process_system_update_keyspace
+        self._processMap["system_update_column_family"] = Processor.process_system_update_column_family
+        self._processMap["execute_cql_query"] = Processor.process_execute_cql_query
+        self._processMap["execute_cql3_query"] = Processor.process_execute_cql3_query
+        self._processMap["prepare_cql_query"] = Processor.process_prepare_cql_query
+        self._processMap["prepare_cql3_query"] = Processor.process_prepare_cql3_query
+        self._processMap["execute_prepared_cql_query"] = Processor.process_execute_prepared_cql_query
+        self._processMap["execute_prepared_cql3_query"] = Processor.process_execute_prepared_cql3_query
+        self._processMap["set_cql_version"] = Processor.process_set_cql_version
+
+    def process(self, iprot, oprot):
+        (name, type, seqid) = iprot.readMessageBegin()
+        if name not in self._processMap:
+            iprot.skip(TType.STRUCT)
+            iprot.readMessageEnd()
+            x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
+            oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
+            x.write(oprot)
+            oprot.writeMessageEnd()
+            oprot.trans.flush()
+            return
+        else:
+            self._processMap[name](self, seqid, iprot, oprot)
+        return True
+
+    def process_login(self, seqid, iprot, oprot):
+        args = login_args()
+        args.read(iprot)
+        iprot.readMessageEnd()
+        result = login_result()
+        try:
+            self._handler.login(args.auth_request)
+            msg_type = TMessageType.REPLY
+        except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+            raise
+        except AuthenticationException as authnx:
+            msg_type = TMessageType.REPLY
+            result.authnx = authnx
+        except AuthorizationException as authzx:
+            msg_type = TMessageType.REPLY
+            result.authzx = authzx
+        except Exception as ex:
+            msg_type = TMessageType.EXCEPTION
+            logging.exception(ex)
+            result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+        oprot.writeMessageBegin("login", msg_type, seqid)
+        result.write(oprot)
+        oprot.writeMessageEnd()
+        oprot.trans.flush()
+
+    def process_set_keyspace(self, seqid, iprot, oprot):
+        args = set_keyspace_args()
+        args.read(iprot)
+        iprot.readMessageEnd()
+        result = set_keyspace_result()
+        try:
+            self._handler.set_keyspace(args.keyspace)
+            msg_type = TMessageType.REPLY
+        except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+            raise
+        except InvalidRequestException as ire:
+            msg_type = TMessageType.REPLY
+            result.ire = ire
+        except Exception as ex:
+            msg_type = TMessageType.EXCEPTION
+            logging.exception(ex)
+            result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+        oprot.writeMessageBegin("set_keyspace", msg_type, seqid)
+        result.write(oprot)
+        oprot.writeMessageEnd()
+        oprot.trans.flush()
+
+    def process_get(self, seqid, iprot, oprot):
+        args = get_args()
+        args.read(iprot)
+        iprot.readMessageEnd()
+        result = get_result()
+        try:
+            result.success = self._handler.get(args.key, args.c

<TRUNCATED>
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/thrift010/__init__.py
----------------------------------------------------------------------
diff --git a/thrift_bindings/thrift010/__init__.py b/thrift_bindings/thrift010/__init__.py
new file mode 100644
index 0000000..2132df0
--- /dev/null
+++ b/thrift_bindings/thrift010/__init__.py
@@ -0,0 +1 @@
+__all__ = ['ttypes', 'constants', 'Cassandra']


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[08/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_test.py
----------------------------------------------------------------------
diff --git a/thrift_test.py b/thrift_test.py
new file mode 100644
index 0000000..23e3f7b
--- /dev/null
+++ b/thrift_test.py
@@ -0,0 +1,2649 @@
+import re
+import struct
+import time
+import uuid
+import pytest
+import logging
+
+from thrift.protocol import TBinaryProtocol
+from thrift.Thrift import TApplicationException
+from thrift.transport import TSocket, TTransport
+
+from tools.assertions import assert_length_equal
+from tools.misc import ImmutableMapping
+
+from dtest_setup_overrides import DTestSetupOverrides
+from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester
+
+from thrift_bindings.thrift010 import Cassandra
+from thrift_bindings.thrift010.Cassandra import (CfDef, Column, ColumnDef,
+                                           ColumnOrSuperColumn, ColumnParent,
+                                           ColumnPath, ColumnSlice,
+                                           ConsistencyLevel, CounterColumn,
+                                           Deletion, IndexExpression,
+                                           IndexOperator, IndexType,
+                                           InvalidRequestException, KeyRange,
+                                           KeySlice, KsDef, MultiSliceRequest,
+                                           Mutation, NotFoundException,
+                                           SlicePredicate, SliceRange,
+                                           SuperColumn)
+from tools.assertions import (assert_all, assert_none, assert_one)
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
+
+def get_thrift_client(host='127.0.0.1', port=9160):
+    socket = TSocket.TSocket(host, port)
+    transport = TTransport.TFramedTransport(socket)
+    protocol = TBinaryProtocol.TBinaryProtocol(transport)
+    client = Cassandra.Client(protocol)
+    client.transport = transport
+    return client
+
+
+client = None
+
+pid_fname = "system_test.pid"
+
+
+def pid():
+    return int(open(pid_fname).read())
+
+
+@since('2.0', max_version='4')
+class TestThrift(Tester):
+
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        """
+        @jira_ticket CASSANDRA-7653
+        """
+        dtest_setup_overrides.cluster_options = ImmutableMapping(
+            {'partitioner': 'org.apache.cassandra.dht.ByteOrderedPartitioner',
+             'start_rpc': 'true'})
+        return dtest_setup_overrides
+
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_set_cluster_settings(self, fixture_dtest_setup):
+        fixture_dtest_setup.cluster.populate(1)
+        node1, = fixture_dtest_setup.cluster.nodelist()
+
+        # If vnodes are not used, we must set our own initial_token
+        # Because ccm will not set a hex token for ByteOrderedPartitioner
+        # automatically. It does not matter what token we set as we only
+        # ever use one node.
+        if not self.dtest_config.use_vnodes:
+            node1.set_configuration_options(values={'initial_token': "a".encode('hex')})
+
+        fixture_dtest_setup.cluster.start(wait_for_binary_proto=True)
+        fixture_dtest_setup.cluster.nodelist()[0].watch_log_for("Listening for thrift clients")  # Wait for the thrift port to open
+        time.sleep(0.1)
+        # this is ugly, but the whole test module is written against a global client
+        global client
+        client = get_thrift_client()
+        client.transport.open()
+        self.define_schema()
+
+        yield client
+
+        client.transport.close()
+
+    def define_schema(self):
+        keyspace1 = Cassandra.KsDef('Keyspace1', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'},
+                                    cf_defs=[
+            Cassandra.CfDef('Keyspace1', 'Standard1'),
+            Cassandra.CfDef('Keyspace1', 'Standard2'),
+            Cassandra.CfDef('Keyspace1', 'Standard3', column_metadata=[Cassandra.ColumnDef('c1', 'AsciiType'), Cassandra.ColumnDef('c2', 'AsciiType')]),
+            Cassandra.CfDef('Keyspace1', 'Standard4', column_metadata=[Cassandra.ColumnDef('c1', 'AsciiType')]),
+            Cassandra.CfDef('Keyspace1', 'StandardLong1', comparator_type='LongType'),
+            Cassandra.CfDef('Keyspace1', 'StandardInteger1', comparator_type='IntegerType'),
+            Cassandra.CfDef('Keyspace1', 'StandardComposite', comparator_type='CompositeType(AsciiType, AsciiType)'),
+            Cassandra.CfDef('Keyspace1', 'Super1', column_type='Super', subcomparator_type='LongType'),
+            Cassandra.CfDef('Keyspace1', 'Super2', column_type='Super', subcomparator_type='LongType'),
+            Cassandra.CfDef('Keyspace1', 'Super3', column_type='Super', comparator_type='LongType', subcomparator_type='UTF8Type'),
+            Cassandra.CfDef('Keyspace1', 'Counter1', default_validation_class='CounterColumnType'),
+            Cassandra.CfDef('Keyspace1', 'SuperCounter1', column_type='Super', default_validation_class='CounterColumnType'),
+            Cassandra.CfDef('Keyspace1', 'Indexed1', column_metadata=[Cassandra.ColumnDef('birthdate', 'LongType', Cassandra.IndexType.KEYS, 'birthdate_index')]),
+            Cassandra.CfDef('Keyspace1', 'Indexed2', comparator_type='TimeUUIDType', column_metadata=[Cassandra.ColumnDef(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, 'LongType', Cassandra.IndexType.KEYS)]),
+            Cassandra.CfDef('Keyspace1', 'Indexed3', comparator_type='TimeUUIDType', column_metadata=[Cassandra.ColumnDef(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, 'UTF8Type', Cassandra.IndexType.KEYS)]),
+            Cassandra.CfDef('Keyspace1', 'Indexed4', column_metadata=[Cassandra.ColumnDef('a', 'LongType', Cassandra.IndexType.KEYS, 'a_index'), Cassandra.ColumnDef('z', 'UTF8Type')]),
+            Cassandra.CfDef('Keyspace1', 'Expiring', default_time_to_live=2)
+        ])
+
+        keyspace2 = Cassandra.KsDef('Keyspace2', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'},
+                                    cf_defs=[
+                                        Cassandra.CfDef('Keyspace2', 'Standard1'),
+                                        Cassandra.CfDef('Keyspace2', 'Standard3'),
+                                        Cassandra.CfDef('Keyspace2', 'Super3', column_type='Super', subcomparator_type='BytesType'),
+                                        Cassandra.CfDef('Keyspace2', 'Super4', column_type='Super', subcomparator_type='TimeUUIDType'), ])
+
+        for ks in [keyspace1, keyspace2]:
+            cls.client.system_add_keyspace(ks)
+
+
+def i64(n):
+    return _i64(n)
+
+
+def i32(n):
+    return _i32(n)
+
+
+def i16(n):
+    return _i16(n)
+
+
+def composite(item1, item2=None, eoc='\x00'):
+    packed = _i16(len(item1)) + item1 + eoc
+    if item2 is not None:
+        packed += _i16(len(item2)) + item2
+        packed += eoc
+    return packed
+
+
+def _i64(n):
+    return struct.pack('>q', n)  # big endian = network order
+
+
+def _i32(n):
+    return struct.pack('>i', n)  # big endian = network order
+
+
+def _i16(n):
+    return struct.pack('>h', n)  # big endian = network order
+
+
+_SIMPLE_COLUMNS = [Column('c1', 'value1', 0),
+                   Column('c2', 'value2', 0)]
+_SUPER_COLUMNS = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
+                  SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 0),
+                                                   Column(_i64(6), 'value6', 0)])]
+
+
+def _assert_column(column_family, key, column, value, ts=0):
+    try:
+        assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
+    except NotFoundException:
+        raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value))
+
+
+def _assert_columnpath_exists(key, column_path):
+    try:
+        assert client.get(key, column_path, ConsistencyLevel.ONE)
+    except NotFoundException:
+        raise Exception('expected %s with %s but was not present.' % (key, column_path))
+
+
+def _assert_no_columnpath(key, column_path):
+    try:
+        client.get(key, column_path, ConsistencyLevel.ONE)
+        assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
+    except NotFoundException:
+        assert True, 'column did not exist'
+
+
+def _insert_simple():
+    return _insert_multi(['key1'])
+
+
+def _insert_multi(keys):
+    CL = ConsistencyLevel.ONE
+    for key in keys:
+        client.insert(key, ColumnParent('Standard1'), Column('c1', 'value1', 0), CL)
+        client.insert(key, ColumnParent('Standard1'), Column('c2', 'value2', 0), CL)
+
+
+def _insert_batch():
+    cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
+             'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
+    client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
+
+
+def _big_slice(key, column_parent):
+    p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
+    return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
+
+
+def _big_multislice(keys, column_parent):
+    p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
+    return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
+
+
+def _verify_batch():
+    _verify_simple()
+    L = [result.column
+         for result in _big_slice('key1', ColumnParent('Standard2'))]
+    assert L == _SIMPLE_COLUMNS, L
+
+
+def _verify_simple():
+    assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE).column == Column('c1', 'value1', 0)
+    L = [result.column
+         for result in _big_slice('key1', ColumnParent('Standard1'))]
+    assert L == _SIMPLE_COLUMNS, L
+
+
+def _insert_super(key='key1'):
+    client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
+    client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
+    client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
+
+
+def _insert_range():
+    client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
+    client.insert('key1', ColumnParent('Standard1'), Column('c2', 'value2', 0), ConsistencyLevel.ONE)
+    client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
+
+
+def _verify_range():
+    p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
+    result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
+    assert len(result) == 2
+    assert result[0].column.name == 'c1'
+    assert result[1].column.name == 'c2'
+
+    p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
+    result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
+    assert len(result) == 2
+    assert result[0].column.name == 'c3'
+    assert result[1].column.name == 'c2'
+
+    p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
+    result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
+    assert len(result) == 3, result
+
+    p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
+    result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
+    assert len(result) == 2, result
+
+
+def _set_keyspace(keyspace):
+    client.set_keyspace(keyspace)
+
+
+def _insert_super_range():
+    client.insert('key1', ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
+    client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
+    client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
+    client.insert('key1', ColumnParent('Super1', 'sc3'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
+    time.sleep(0.1)
+
+
+def _verify_super_range():
+    p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
+    result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
+    assert len(result) == 2
+    assert result[0].super_column.name == 'sc2'
+    assert result[1].super_column.name == 'sc3'
+
+    p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
+    result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
+    assert len(result) == 2
+    assert result[0].super_column.name == 'sc3'
+    assert result[1].super_column.name == 'sc2'
+
+
+def _verify_super(supercf='Super1', key='key1'):
+    assert client.get(key, ColumnPath(supercf, 'sc1', _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), 'value4', 0)
+    slice = [result.super_column
+             for result in _big_slice(key, ColumnParent('Super1'))]
+    assert slice == _SUPER_COLUMNS, slice
+
+
+def _expect_exception(fn, type_):
+    try:
+        r = fn()
+    except type_ as t:
+        return t
+    else:
+        raise Exception('expected %s; got %s' % (type_.__name__, r))
+
+
+def _expect_missing(fn):
+    _expect_exception(fn, NotFoundException)
+
+
+def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
+    kr = KeyRange(start, end, count=count, row_filter=row_filter)
+    return client.get_range_slices(parent, predicate, kr, cl)
+
+
+def _insert_six_columns(key='abc'):
+    CL = ConsistencyLevel.ONE
+    client.insert(key, ColumnParent('Standard1'), Column('a', '1', 0), CL)
+    client.insert(key, ColumnParent('Standard1'), Column('b', '2', 0), CL)
+    client.insert(key, ColumnParent('Standard1'), Column('c', '3', 0), CL)
+    client.insert(key, ColumnParent('Standard1'), Column('d', '4', 0), CL)
+    client.insert(key, ColumnParent('Standard1'), Column('e', '5', 0), CL)
+    client.insert(key, ColumnParent('Standard1'), Column('f', '6', 0), CL)
+
+
+def _big_multi_slice(key='abc'):
+    c1 = ColumnSlice()
+    c1.start = 'a'
+    c1.finish = 'c'
+    c2 = ColumnSlice()
+    c2.start = 'e'
+    c2.finish = 'f'
+    m = MultiSliceRequest()
+    m.key = key
+    m.column_parent = ColumnParent('Standard1')
+    m.column_slices = [c1, c2]
+    m.reversed = False
+    m.count = 10
+    m.consistency_level = ConsistencyLevel.ONE
+    return client.get_multi_slice(m)
+
+
+_MULTI_SLICE_COLUMNS = [Column('a', '1', 0), Column('b', '2', 0), Column('c', '3', 0), Column('e', '5', 0), Column('f', '6', 0)]
+
+
+@since('2.0', max_version='4')
+class TestMutations(TestThrift):
+
+    def truncate_all(self, *table_names):
+        for table in table_names:
+            client.truncate(table)
+
+    def test_insert(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+        _insert_simple()
+        _verify_simple()
+
+    def test_empty_slice(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard2', 'Super1')
+        assert _big_slice('key1', ColumnParent('Standard2')) == []
+        assert _big_slice('key1', ColumnParent('Super1')) == []
+
+    def test_cas(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Standard3', 'Standard4')
+
+        def cas(expected, updates, column_family):
+            return client.cas('key1', column_family, expected, updates, ConsistencyLevel.SERIAL, ConsistencyLevel.QUORUM)
+
+        def test_cas_operations(first_columns, second_columns, column_family):
+            # partition should be empty, so cas expecting any existing values should fail
+            cas_result = cas(first_columns, first_columns, column_family)
+            assert not cas_result.success
+            assert len(cas_result.current_values) == 0, cas_result
+
+            # cas of empty columns -> first_columns should succeed
+            # and the reading back from the table should match first_columns
+            assert cas([], first_columns, column_family).success
+            result = [cosc.column for cosc in _big_slice('key1', ColumnParent(column_family))]
+            # CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
+            assert dict((c.name, c.value) for c in result) == dict((ex.name, ex.value) for ex in first_columns)
+
+            # now that the partition has been updated, repeating the
+            # operation which expects it to be empty should not succeed
+            cas_result = cas([], first_columns, column_family)
+            assert not cas_result.success
+            # When we CAS for non-existence, current_values is the first live column of the row
+            assert dict((c.name, c.value) for c in cas_result.current_values) == {first_columns[0].name: first_columns[0].value}, cas_result
+
+            # CL.SERIAL for reads
+            assert client.get('key1', ColumnPath(column_family, column=first_columns[0].name), ConsistencyLevel.SERIAL).column.value == first_columns[0].value
+
+            # cas first_columns -> second_columns should succeed
+            assert cas(first_columns, second_columns, column_family).success
+
+            # as before, an operation with an incorrect expectation should fail
+            cas_result = cas(first_columns, second_columns, column_family)
+            assert not cas_result.success
+
+        updated_columns = [Column('c1', 'value101', 1),
+                           Column('c2', 'value102', 1)]
+
+        logger.debug("Testing CAS operations on dynamic cf")
+        test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard1')
+        logger.debug("Testing CAS operations on static cf")
+        test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard3')
+        logger.debug("Testing CAS on mixed static/dynamic cf")
+        test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard4')
+
+    def test_missing_super(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
+        _insert_super()
+        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
+
+    def test_count(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Standard2', 'Super1')
+
+        _insert_simple()
+        _insert_super()
+        p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
+        assert client.get_count('key1', ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
+        assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
+        assert client.get_count('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE) == 2
+        assert client.get_count('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
+
+        # Let's make that a little more interesting
+        client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('c4', 'value4', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('c5', 'value5', 0), ConsistencyLevel.ONE)
+
+        p = SlicePredicate(slice_range=SliceRange('c2', 'c4', False, 1000))
+        assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
+
+    def test_count_paging(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        _insert_simple()
+
+        # Exercise paging
+        column_parent = ColumnParent('Standard1')
+        # Paging for small columns starts at 1024 columns
+        columns_to_insert = [Column('c%d' % (i,), 'value%d' % (i,), 0) for i in range(3, 1026)]
+        cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
+        client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
+
+        p = SlicePredicate(slice_range=SliceRange('', '', False, 2000))
+        assert client.get_count('key1', column_parent, p, ConsistencyLevel.ONE) == 1025
+
+        # Ensure that the count limit isn't clobbered
+        p = SlicePredicate(slice_range=SliceRange('', '', False, 10))
+        assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
+
+    # test get_count() to work correctly with 'count' settings around page size (CASSANDRA-4833)
+    def test_count_around_page_size(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        def slice_predicate(count):
+            return SlicePredicate(slice_range=SliceRange('', '', False, count))
+
+        key = 'key1'
+        parent = ColumnParent('Standard1')
+        cl = ConsistencyLevel.ONE
+
+        for i in range(0, 3050):
+            client.insert(key, parent, Column(str(i), '', 0), cl)
+
+        # same as page size
+        assert client.get_count(key, parent, slice_predicate(1024), cl) == 1024
+
+        # 1 above page size
+        assert client.get_count(key, parent, slice_predicate(1025), cl) == 1025
+
+        # above number or columns
+        assert client.get_count(key, parent, slice_predicate(4000), cl) == 3050
+
+        # same as number of columns
+        assert client.get_count(key, parent, slice_predicate(3050), cl) == 3050
+
+        # 1 above number of columns
+        assert client.get_count(key, parent, slice_predicate(3051), cl) == 3050
+
+    def test_super_insert(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        _insert_super()
+        _verify_super()
+
+    def test_super_get(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        _insert_super()
+        result = client.get('key1', ColumnPath('Super1', 'sc2'), ConsistencyLevel.ONE).super_column
+        assert result == _SUPER_COLUMNS[1], result
+
+    def test_super_subcolumn_limit(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+        _insert_super()
+        p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
+        column_parent = ColumnParent('Super1', 'sc2')
+        slice = [result.column
+                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
+        assert slice == [Column(_i64(5), 'value5', 0)], slice
+        p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
+        slice = [result.column
+                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
+        assert slice == [Column(_i64(6), 'value6', 0)], slice
+
+    def test_long_order(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('StandardLong1')
+
+        def long_xrange(start, stop, step):
+            i = start
+            while i < stop:
+                yield i
+                i += step
+        L = []
+        for i in long_xrange(0, 104294967296, 429496729):
+            name = _i64(i)
+            client.insert('key1', ColumnParent('StandardLong1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
+            L.append(name)
+        slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardLong1'))]
+        assert slice == L, slice
+
+    def test_integer_order(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('StandardInteger1')
+
+        def long_xrange(start, stop, step):
+            i = start
+            while i >= stop:
+                yield i
+                i -= step
+        L = []
+        for i in long_xrange(104294967296, 0, 429496729):
+            name = _i64(i)
+            client.insert('key1', ColumnParent('StandardInteger1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
+            L.append(name)
+        slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardInteger1'))]
+        L.sort()
+        assert slice == L, slice
+
+    def test_time_uuid(self):
+        _set_keyspace('Keyspace2')
+        self.truncate_all('Super4')
+
+        import uuid
+        L = []
+
+        # 100 isn't enough to fail reliably if the comparator is borked
+        for i in range(500):
+            L.append(uuid.uuid1())
+            client.insert('key1', ColumnParent('Super4', 'sc1'), Column(L[-1].bytes, 'value%s' % i, i), ConsistencyLevel.ONE)
+        slice = _big_slice('key1', ColumnParent('Super4', 'sc1'))
+        assert len(slice) == 500, len(slice)
+        for i in range(500):
+            u = slice[i].column
+            assert u.value == 'value%s' % i
+            assert u.name == L[i].bytes
+
+        p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
+        column_parent = ColumnParent('Super4', 'sc1')
+        slice = [result.column
+                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
+        assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
+
+        p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
+        column_parent = ColumnParent('Super4', 'sc1')
+        slice = [result.column
+                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
+        assert slice == [Column(L[0].bytes, 'value0', 0),
+                         Column(L[1].bytes, 'value1', 1),
+                         Column(L[2].bytes, 'value2', 2)], slice
+
+        p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
+        column_parent = ColumnParent('Super4', 'sc1')
+        slice = [result.column
+                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
+        assert slice == [Column(L[2].bytes, 'value2', 2),
+                         Column(L[1].bytes, 'value1', 1),
+                         Column(L[0].bytes, 'value0', 0)], slice
+
+        p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
+        column_parent = ColumnParent('Super4', 'sc1')
+        slice = [result.column
+                 for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
+        assert slice == [Column(L[2].bytes, 'value2', 2)], slice
+
+    def test_long_remove(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('StandardLong1')
+
+        column_parent = ColumnParent('StandardLong1')
+        sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
+        for i in range(10):
+            parent = ColumnParent('StandardLong1')
+
+            client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
+            client.remove('key1', ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
+            slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
+            assert slice == [], slice
+            # resurrect
+            client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
+            slice = [result.column
+                     for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
+            assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
+
+    def test_integer_remove(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('StandardInteger1')
+
+        column_parent = ColumnParent('StandardInteger1')
+        sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
+        for i in range(10):
+            parent = ColumnParent('StandardInteger1')
+
+            client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
+            client.remove('key1', ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
+            slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
+            assert slice == [], slice
+            # resurrect
+            client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
+            slice = [result.column
+                     for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
+            assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
+
+    def test_batch_insert(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Standard2')
+        _insert_batch()
+        _verify_batch()
+
+    def test_batch_mutate_standard_columns(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Standard2')
+
+        column_families = ['Standard1', 'Standard2']
+        keys = ['key_%d' % i for i in range(27, 32)]
+        mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
+        mutation_map = dict((column_family, mutations) for column_family in column_families)
+        keyed_mutations = dict((key, mutation_map) for key in keys)
+
+        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
+
+        for column_family in column_families:
+            for key in keys:
+                _assert_column(column_family, key, 'c1', 'value1')
+
+    def test_batch_mutate_remove_standard_columns(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Standard2')
+
+        column_families = ['Standard1', 'Standard2']
+        keys = ['key_%d' % i for i in range(11, 21)]
+        _insert_multi(keys)
+
+        mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
+        mutation_map = dict((column_family, mutations) for column_family in column_families)
+
+        keyed_mutations = dict((key, mutation_map) for key in keys)
+
+        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
+
+        for column_family in column_families:
+            for c in _SIMPLE_COLUMNS:
+                for key in keys:
+                    _assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
+
+    def test_batch_mutate_remove_standard_row(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Standard2')
+
+        column_families = ['Standard1', 'Standard2']
+        keys = ['key_%d' % i for i in range(11, 21)]
+        _insert_multi(keys)
+
+        mutations = [Mutation(deletion=Deletion(20))]
+        mutation_map = dict((column_family, mutations) for column_family in column_families)
+
+        keyed_mutations = dict((key, mutation_map) for key in keys)
+
+        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
+
+        for column_family in column_families:
+            for c in _SIMPLE_COLUMNS:
+                for key in keys:
+                    _assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
+
+    def test_batch_mutate_remove_super_columns_with_standard_under(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1', 'Super2')
+
+        column_families = ['Super1', 'Super2']
+        keys = ['key_%d' % i for i in range(11, 21)]
+        _insert_super()
+
+        mutations = []
+        for sc in _SUPER_COLUMNS:
+            names = []
+            for c in sc.columns:
+                names.append(c.name)
+            mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
+
+        mutation_map = dict((column_family, mutations) for column_family in column_families)
+
+        keyed_mutations = dict((key, mutation_map) for key in keys)
+
+        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
+        for column_family in column_families:
+            for sc in _SUPER_COLUMNS:
+                for c in sc.columns:
+                    for key in keys:
+                        _assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
+
+    def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        keys = ['key_%d' % i for i in range(17, 21)]
+
+        for key in keys:
+            _insert_super(key)
+
+        mutations = []
+
+        for sc in _SUPER_COLUMNS:
+            mutations.append(Mutation(deletion=Deletion(20,
+                                                        super_column=sc.name)))
+
+        mutation_map = {'Super1': mutations}
+
+        keyed_mutations = dict((key, mutation_map) for key in keys)
+
+        # Sanity check
+        for sc in _SUPER_COLUMNS:
+            for key in keys:
+                _assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
+
+        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
+
+        for sc in _SUPER_COLUMNS:
+            for c in sc.columns:
+                for key in keys:
+                    _assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
+
+    def test_batch_mutate_remove_super_columns_entire_row(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        keys = ['key_%d' % i for i in range(17, 21)]
+
+        for key in keys:
+            _insert_super(key)
+
+        mutations = []
+
+        mutations.append(Mutation(deletion=Deletion(20)))
+
+        mutation_map = {'Super1': mutations}
+
+        keyed_mutations = dict((key, mutation_map) for key in keys)
+
+        # Sanity check
+        for sc in _SUPER_COLUMNS:
+            for key in keys:
+                _assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
+
+        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
+
+        for sc in _SUPER_COLUMNS:
+            for key in keys:
+                _assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
+
+    # known failure: see CASSANDRA-10046
+    def test_batch_mutate_remove_slice_standard(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        columns = [Column('c1', 'value1', 0),
+                   Column('c2', 'value2', 0),
+                   Column('c3', 'value3', 0),
+                   Column('c4', 'value4', 0),
+                   Column('c5', 'value5', 0)]
+
+        for column in columns:
+            client.insert('key', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
+
+        d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start='c2', finish='c4')))
+        client.batch_mutate({'key': {'Standard1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
+
+        _assert_columnpath_exists('key', ColumnPath('Standard1', column='c1'))
+        _assert_no_columnpath('key', ColumnPath('Standard1', column='c2'))
+        _assert_no_columnpath('key', ColumnPath('Standard1', column='c3'))
+        _assert_no_columnpath('key', ColumnPath('Standard1', column='c4'))
+        _assert_columnpath_exists('key', ColumnPath('Standard1', column='c5'))
+
+    # known failure: see CASSANDRA-10046
+    def test_batch_mutate_remove_slice_of_entire_supercolumns(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        columns = [SuperColumn(name='sc1', columns=[Column(_i64(1), 'value1', 0)]),
+                   SuperColumn(name='sc2',
+                               columns=[Column(_i64(2), 'value2', 0), Column(_i64(3), 'value3', 0)]),
+                   SuperColumn(name='sc3', columns=[Column(_i64(4), 'value4', 0)]),
+                   SuperColumn(name='sc4',
+                               columns=[Column(_i64(5), 'value5', 0), Column(_i64(6), 'value6', 0)]),
+                   SuperColumn(name='sc5', columns=[Column(_i64(7), 'value7', 0)])]
+
+        for column in columns:
+            for subcolumn in column.columns:
+                client.insert('key', ColumnParent('Super1', column.name), subcolumn, ConsistencyLevel.ONE)
+
+        d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start='sc2', finish='sc4')))
+        client.batch_mutate({'key': {'Super1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
+
+        _assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(1)))
+        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc2', column=_i64(2)))
+        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc2', column=_i64(3)))
+        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc3', column=_i64(4)))
+        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc4', column=_i64(5)))
+        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc4', column=_i64(6)))
+        _assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc5', column=_i64(7)))
+
+    @since('1.0', '2.2')
+    def test_batch_mutate_remove_slice_part_of_supercolumns(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        columns = [Column(_i64(1), 'value1', 0),
+                   Column(_i64(2), 'value2', 0),
+                   Column(_i64(3), 'value3', 0),
+                   Column(_i64(4), 'value4', 0),
+                   Column(_i64(5), 'value5', 0)]
+
+        for column in columns:
+            client.insert('key', ColumnParent('Super1', 'sc1'), column, ConsistencyLevel.ONE)
+
+        r = SliceRange(start=_i64(2), finish=_i64(4))
+        d = Deletion(1, super_column='sc1', predicate=SlicePredicate(slice_range=r))
+        client.batch_mutate({'key': {'Super1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
+
+        _assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(1)))
+        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(2)))
+        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(3)))
+        _assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(4)))
+        _assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(5)))
+
+    def test_batch_mutate_insertions_and_deletions(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1', 'Super2')
+
+        first_insert = SuperColumn("sc1",
+                                   columns=[Column(_i64(20), 'value20', 3),
+                                            Column(_i64(21), 'value21', 3)])
+        second_insert = SuperColumn("sc1",
+                                    columns=[Column(_i64(20), 'value20', 3),
+                                             Column(_i64(21), 'value21', 3)])
+        first_deletion = {'super_column': "sc1",
+                          'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
+        second_deletion = {'super_column': "sc2",
+                           'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
+
+        keys = ['key_30', 'key_31']
+        for key in keys:
+            sc = SuperColumn('sc1', [Column(_i64(22), 'value22', 0),
+                                     Column(_i64(23), 'value23', 0)])
+            cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
+            client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
+
+            sc2 = SuperColumn('sc2', [Column(_i64(22), 'value22', 0),
+                                      Column(_i64(23), 'value23', 0)])
+            cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
+            client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
+
+        cfmap3 = {
+            'Super1': [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
+                       Mutation(deletion=Deletion(3, **first_deletion))],
+
+            'Super2': [Mutation(deletion=Deletion(2, **second_deletion)),
+                       Mutation(ColumnOrSuperColumn(super_column=second_insert))]
+        }
+
+        keyed_mutations = dict((key, cfmap3) for key in keys)
+        client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
+
+        for key in keys:
+            for c in [_i64(22), _i64(23)]:
+                _assert_no_columnpath(key, ColumnPath('Super1', super_column='sc1', column=c))
+                _assert_no_columnpath(key, ColumnPath('Super2', super_column='sc2', column=c))
+
+            for c in [_i64(20), _i64(21)]:
+                _assert_columnpath_exists(key, ColumnPath('Super1', super_column='sc1', column=c))
+                _assert_columnpath_exists(key, ColumnPath('Super2', super_column='sc1', column=c))
+
+    def test_bad_system_calls(self):
+        def duplicate_index_names():
+            _set_keyspace('Keyspace1')
+            cd1 = ColumnDef('foo', 'BytesType', IndexType.KEYS, 'i')
+            cd2 = ColumnDef('bar', 'BytesType', IndexType.KEYS, 'i')
+            cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
+            client.system_add_column_family(cf)
+        _expect_exception(duplicate_index_names, InvalidRequestException)
+
+    def test_bad_batch_calls(self):
+        # mutate_does_not_accept_cosc_and_deletion_in_same_mutation
+        def too_full():
+            _set_keyspace('Keyspace1')
+            col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
+            dele = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
+            client.batch_mutate({'key_34': {'Standard1': [Mutation(col, dele)]}},
+                                ConsistencyLevel.ONE)
+        _expect_exception(too_full, InvalidRequestException)
+
+        # test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
+        def bad_cf():
+            _set_keyspace('Keyspace1')
+            col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
+            client.batch_mutate({'key_36': {'Undefined': [Mutation(col)]}},
+                                ConsistencyLevel.ONE)
+        _expect_exception(bad_cf, InvalidRequestException)
+
+        # test_batch_mutate_does_not_accept_deletion_on_undefined_cf
+        def bad_cf_2():
+            _set_keyspace('Keyspace1')
+            d = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
+            client.batch_mutate({'key_37': {'Undefined': [Mutation(deletion=d)]}},
+                                ConsistencyLevel.ONE)
+        _expect_exception(bad_cf_2, InvalidRequestException)
+
+        # a column value that does not match the declared validator
+        def send_string_instead_of_long():
+            _set_keyspace('Keyspace1')
+            col = ColumnOrSuperColumn(column=Column('birthdate', 'bar', 0))
+            client.batch_mutate({'key_38': {'Indexed1': [Mutation(col)]}},
+                                ConsistencyLevel.ONE)
+        _expect_exception(send_string_instead_of_long, InvalidRequestException)
+
+    def test_column_name_lengths(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        _expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
+        client.insert('key1', ColumnParent('Standard1'), Column('x' * 1, 'value', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('x' * 127, 'value', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('x' * 128, 'value', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('x' * 129, 'value', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('x' * 255, 'value', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('x' * 256, 'value', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('x' * 257, 'value', 0), ConsistencyLevel.ONE)
+        client.insert('key1', ColumnParent('Standard1'), Column('x' * (2 ** 16 - 1), 'value', 0), ConsistencyLevel.ONE)
+        _expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('x' * (2 ** 16), 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
+
+    def test_bad_calls(self):
+        _set_keyspace('Keyspace1')
+
+        # missing arguments
+        _expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
+        # supercolumn in a non-super CF
+        _expect_exception(lambda: client.insert('key1', ColumnParent('Standard1', 'x'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
+        # no supercolumn in a super CF
+        _expect_exception(lambda: client.insert('key1', ColumnParent('Super1'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
+        # column but no supercolumn in remove
+        _expect_exception(lambda: client.remove('key1', ColumnPath('Super1', column='x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
+        # super column in non-super CF
+        _expect_exception(lambda: client.remove('key1', ColumnPath('Standard1', 'y', 'x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
+        # key too long
+        _expect_exception(lambda: client.get('x' * 2 ** 16, ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
+        # empty key
+        _expect_exception(lambda: client.get('', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
+        cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
+                 'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
+        _expect_exception(lambda: client.batch_mutate({'': cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
+        # empty column name
+        _expect_exception(lambda: client.get('key1', ColumnPath('Standard1', column=''), ConsistencyLevel.ONE), InvalidRequestException)
+        # get doesn't specify column name
+        _expect_exception(lambda: client.get('key1', ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
+        # supercolumn in a non-super CF
+        _expect_exception(lambda: client.get('key1', ColumnPath('Standard1', 'x', 'y'), ConsistencyLevel.ONE), InvalidRequestException)
+        # get doesn't specify supercolumn name
+        _expect_exception(lambda: client.get('key1', ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
+        # invalid CF
+        _expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=['', '']), '', '', 5, ConsistencyLevel.ONE), InvalidRequestException)
+        # 'x' is not a valid Long
+        _expect_exception(lambda: client.insert('key1', ColumnParent('Super1', 'sc1'), Column('x', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
+        # start is not a valid Long
+        p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
+        column_parent = ColumnParent('StandardLong1')
+        _expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
+                          InvalidRequestException)
+        # start > finish
+        p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
+        column_parent = ColumnParent('StandardLong1')
+        _expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
+                          InvalidRequestException)
+        # start is not a valid Long, supercolumn version
+        p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
+        column_parent = ColumnParent('Super1', 'sc1')
+        _expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
+                          InvalidRequestException)
+        # start > finish, supercolumn version
+        p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
+        column_parent = ColumnParent('Super1', 'sc1')
+        _expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
+                          InvalidRequestException)
+        # start > finish, key version
+        _expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['']), 'z', 'a', 1, ConsistencyLevel.ONE), InvalidRequestException)
+        # ttl must be greater or equals to zero
+        column = Column('cttl1', 'value1', 0, -1)
+        _expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
+                          InvalidRequestException)
+        # don't allow super_column in Deletion for standard Columntest_expiration_with_default_ttl_and_zero_ttl
+        deletion = Deletion(1, 'supercolumn', None)
+        mutation = Mutation(deletion=deletion)
+        mutations = {'key': {'Standard1': [mutation]}}
+        _expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
+                          InvalidRequestException)
+        # 'x' is not a valid long
+        deletion = Deletion(1, 'x', None)
+        mutation = Mutation(deletion=deletion)
+        mutations = {'key': {'Super3': [mutation]}}
+        _expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
+        # counters don't support ANY
+        _expect_exception(lambda: client.add('key1', ColumnParent('Counter1', 'x'), CounterColumn('y', 1), ConsistencyLevel.ANY), InvalidRequestException)
+
+    def test_batch_insert_super(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1', 'Super2')
+
+        cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
+                            for c in _SUPER_COLUMNS],
+                 'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
+                            for c in _SUPER_COLUMNS]}
+        client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
+        _verify_super('Super1')
+        _verify_super('Super2')
+
+    def test_cf_remove_column(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        _insert_simple()
+        client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
+        _expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE))
+        assert client.get('key1', ColumnPath('Standard1', column='c2'), ConsistencyLevel.ONE).column \
+            == Column('c2', 'value2', 0)
+        assert _big_slice('key1', ColumnParent('Standard1')) \
+            == [ColumnOrSuperColumn(column=Column('c2', 'value2', 0))]
+
+        # New insert, make sure it shows up post-remove:
+        client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
+        columns = [result.column
+                   for result in _big_slice('key1', ColumnParent('Standard1'))]
+        assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
+
+        # Test resurrection.  First, re-insert the value w/ older timestamp,
+        # and make sure it stays removed
+        client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
+        columns = [result.column
+                   for result in _big_slice('key1', ColumnParent('Standard1'))]
+        assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
+        # Next, w/ a newer timestamp; it should come back:
+        client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 2), ConsistencyLevel.ONE)
+        columns = [result.column
+                   for result in _big_slice('key1', ColumnParent('Standard1'))]
+        assert columns == [Column('c1', 'value1', 2), Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
+
+    def test_cf_remove(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Super1')
+
+        _insert_simple()
+        _insert_super()
+
+        # Remove the key1:Standard1 cf; verify super is unaffected
+        client.remove('key1', ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
+        assert _big_slice('key1', ColumnParent('Standard1')) == []
+        _verify_super()
+
+        # Test resurrection.  First, re-insert a value w/ older timestamp,
+        # and make sure it stays removed:
+        client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
+        assert _big_slice('key1', ColumnParent('Standard1')) == []
+        # Next, w/ a newer timestamp; it should come back:
+        client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 4), ConsistencyLevel.ONE)
+        result = _big_slice('key1', ColumnParent('Standard1'))
+        assert result == [ColumnOrSuperColumn(column=Column('c1', 'value1', 4))], result
+
+        # check removing the entire super cf, too.
+        client.remove('key1', ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
+        assert _big_slice('key1', ColumnParent('Super1')) == []
+        assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
+
+    def test_super_cf_remove_and_range_slice(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        client.insert('key3', ColumnParent('Super1', 'sc1'), Column(_i64(1), 'v1', 0), ConsistencyLevel.ONE)
+        client.remove('key3', ColumnPath('Super1', 'sc1'), 5, ConsistencyLevel.ONE)
+
+        rows = {}
+        for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange('', '', False, 1000)), '', '', 1000, ConsistencyLevel.ONE):
+            scs = [cosc.super_column for cosc in row.columns]
+            rows[row.key] = scs
+        assert rows == {'key3': []}, rows
+
+    def test_super_cf_remove_column(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Super1')
+
+        _insert_simple()
+        _insert_super()
+
+        # Make sure remove clears out what it's supposed to, and _only_ that:
+        client.remove('key1', ColumnPath('Super1', 'sc2', _i64(5)), 5, ConsistencyLevel.ONE)
+        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
+        super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
+        assert super_columns == [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
+                                 SuperColumn(name='sc2', columns=[Column(_i64(6), 'value6', 0)])]
+        _verify_simple()
+
+        # New insert, make sure it shows up post-remove:
+        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
+        super_columns_expected = [SuperColumn(name='sc1',
+                                              columns=[Column(_i64(4), 'value4', 0)]),
+                                  SuperColumn(name='sc2',
+                                              columns=[Column(_i64(6), 'value6', 0), Column(_i64(7), 'value7', 0)])]
+
+        super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
+        assert super_columns == super_columns_expected, super_columns
+
+        # Test resurrection.  First, re-insert the value w/ older timestamp,
+        # and make sure it stays removed:
+        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
+
+        super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
+        assert super_columns == super_columns_expected, super_columns
+
+        # Next, w/ a newer timestamp; it should come back
+        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
+        super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
+        super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
+                                  SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6),
+                                                                   Column(_i64(6), 'value6', 0),
+                                                                   Column(_i64(7), 'value7', 0)])]
+        assert super_columns == super_columns_expected, super_columns
+
+        # shouldn't be able to specify a column w/o a super column for remove
+        cp = ColumnPath(column_family='Super1', column='sc2')
+        e = _expect_exception(lambda: client.remove('key1', cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
+        assert e.why.find("column cannot be specified without") >= 0
+
+    def test_super_cf_remove_supercolumn(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Super1')
+
+        _insert_simple()
+        _insert_super()
+
+        # Make sure remove clears out what it's supposed to, and _only_ that:
+        client.remove('key1', ColumnPath('Super1', 'sc2'), 5, ConsistencyLevel.ONE)
+        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
+        super_columns = _big_slice('key1', ColumnParent('Super1', 'sc2'))
+        assert super_columns == [], super_columns
+        super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)])]
+        super_columns = [result.super_column
+                         for result in _big_slice('key1', ColumnParent('Super1'))]
+        assert super_columns == super_columns_expected, super_columns
+        _verify_simple()
+
+        # Test resurrection.  First, re-insert the value w/ older timestamp,
+        # and make sure it stays removed:
+        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 1), ConsistencyLevel.ONE)
+        super_columns = [result.super_column
+                         for result in _big_slice('key1', ColumnParent('Super1'))]
+        assert super_columns == super_columns_expected, super_columns
+
+        # Next, w/ a newer timestamp; it should come back
+        client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
+        super_columns = [result.super_column
+                         for result in _big_slice('key1', ColumnParent('Super1'))]
+        super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
+                                  SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6)])]
+        assert super_columns == super_columns_expected, super_columns
+
+        # check slicing at the subcolumn level too
+        p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
+        columns = [result.column
+                   for result in client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)]
+        assert columns == [Column(_i64(5), 'value5', 6)], columns
+
+    def test_super_cf_resurrect_subcolumn(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        key = 'vijay'
+        client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
+
+        client.remove(key, ColumnPath('Super1', 'sc1'), 1, ConsistencyLevel.ONE)
+
+        client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 2), ConsistencyLevel.ONE)
+
+        result = client.get(key, ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE)
+        assert result.super_column.columns is not None, result.super_column
+
+    def test_empty_range(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Super1')
+
+        assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
+        _insert_simple()
+        assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
+
+    @since('2.1')
+    def test_super_cql_read_compatibility(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        _insert_super("key1")
+        _insert_super("key2")
+
+        node1 = self.cluster.nodelist()[0]
+        session = self.patient_cql_connection(node1)
+
+        session.execute('USE "Keyspace1"')
+
+        assert_all(session, "SELECT * FROM \"Super1\"",
+                   [["key1", "sc1", 4, "value4"],
+                    ["key1", "sc2", 5, "value5"],
+                    ["key1", "sc2", 6, "value6"],
+                    ["key2", "sc1", 4, "value4"],
+                    ["key2", "sc2", 5, "value5"],
+                    ["key2", "sc2", 6, "value6"]])
+
+        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1')",
+                   [["key1", "sc1", 4, "value4"],
+                    ["key1", "sc2", 5, "value5"],
+                    ["key1", "sc2", 6, "value6"]])
+
+        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2')",
+                   [["key1", "sc2", 5, "value5"],
+                    ["key1", "sc2", 6, "value6"]])
+
+        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2') AND column2 = 5",
+                   [["key1", "sc2", 5, "value5"]])
+
+        assert_all(session, "SELECT * FROM \"Super1\" WHERE key = textAsBlob('key1') AND column1 = textAsBlob('sc2')",
+                   [["key1", "sc2", 5, "value5"],
+                    ["key1", "sc2", 6, "value6"]])
+
+        assert_all(session, "SELECT column2, value FROM \"Super1\" WHERE key = textAsBlob('key1') AND column1 = textAsBlob('sc2')",
+                   [[5, "value5"],
+                    [6, "value6"]])
+
+    @since('2.1')
+    def test_super_cql_write_compatibility(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        node1 = self.cluster.nodelist()[0]
+        session = self.patient_cql_connection(node1)
+
+        session.execute('USE "Keyspace1"')
+
+        query = "INSERT INTO \"Super1\" (key, column1, column2, value) VALUES (textAsBlob(%s), textAsBlob(%s), %s, textAsBlob(%s)) USING TIMESTAMP 1234"
+        session.execute(query, ("key1", "sc1", 4, "value4"))
+        session.execute(query, ("key1", "sc2", 5, "value5"))
+        session.execute(query, ("key1", "sc2", 6, "value6"))
+        session.execute(query, ("key2", "sc1", 4, "value4"))
+        session.execute(query, ("key2", "sc2", 5, "value5"))
+        session.execute(query, ("key2", "sc2", 6, "value6"))
+
+        p = SlicePredicate(slice_range=SliceRange('sc1', 'sc2', False, 2))
+        result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
+        assert_length_equal(result, 2)
+        assert result[0].super_column.name == 'sc1'
+        assert result[0].super_column.columns[0], Column(_i64(4), 'value4' == 1234)
+        assert result[1].super_column.name == 'sc2'
+        assert result[1].super_column.columns, [Column(_i64(5), 'value5', 1234), Column(_i64(6), 'value6' == 1234)]
+
+    def test_range_with_remove(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        _insert_simple()
+        assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
+
+        client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
+        client.remove('key1', ColumnPath('Standard1', column='c2'), 1, ConsistencyLevel.ONE)
+        actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c2']), '', '', 1000, ConsistencyLevel.ONE)
+        assert actual == [KeySlice(columns=[], key='key1')], actual
+
+    def test_range_with_remove_cf(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        _insert_simple()
+        assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
+
+        client.remove('key1', ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
+        actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE)
+        assert actual == [KeySlice(columns=[], key='key1')], actual
+
+    def test_range_collation(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in range(100)]:
+            client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
+
+        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '', 1000, ConsistencyLevel.ONE)
+        L = ['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'b']
+        assert len(slices) == len(L)
+        for key, ks in zip(L, slices):
+            assert key == ks.key
+
+    def test_range_partial(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in range(100)]:
+            client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
+
+        def check_slices_against_keys(keyList, sliceList):
+            assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
+            for key, ks in zip(keyList, sliceList):
+                assert key == ks.key
+
+        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), 'a', '', 1000, ConsistencyLevel.ONE)
+        check_slices_against_keys(['a', 'b'], slices)
+
+        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '15', 1000, ConsistencyLevel.ONE)
+        check_slices_against_keys(['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15'], slices)
+
+        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '50', '51', 1000, ConsistencyLevel.ONE)
+        check_slices_against_keys(['50', '51'], slices)
+
+        slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '1', '', 10, ConsistencyLevel.ONE)
+        check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
+
+    def test_get_slice_range(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        _insert_range()
+        _verify_range()
+
+    def test_get_slice_super_range(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        _insert_super_range()
+        _verify_super_range()
+
+    def test_get_range_slices_tokens(self):
+        _set_keyspace('Keyspace2')
+        self.truncate_all('Super3')
+
+        for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
+            for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
+                client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
+
+        cp = ColumnParent('Super3', 'sc1')
+        predicate = SlicePredicate(column_names=['col1', 'col3'])
+        range = KeyRange(start_token='55', end_token='55', count=100)
+        result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
+        assert len(result) == 5
+        assert result[0].columns[0].column.name == 'col1'
+        assert result[0].columns[1].column.name == 'col3'
+
+    def test_get_range_slice_super(self):
+        _set_keyspace('Keyspace2')
+        self.truncate_all('Super3')
+
+        for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
+            for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
+                client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
+
+        cp = ColumnParent('Super3', 'sc1')
+        result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
+        assert len(result) == 3
+        assert result[0].columns[0].column.name == 'col1'
+        assert result[0].columns[1].column.name == 'col3'
+
+        cp = ColumnParent('Super3')
+        result = get_range_slice(client, cp, SlicePredicate(column_names=['sc1']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
+        assert len(result) == 3
+        assert list(set(row.columns[0].super_column.name for row in result))[0] == 'sc1'
+
+    def test_get_range_slice(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
+            for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
+                client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
+        cp = ColumnParent('Standard1')
+
+        # test empty slice
+        result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key6', '', 1, ConsistencyLevel.ONE)
+        assert len(result) == 0
+
+        # test empty columns
+        result = get_range_slice(client, cp, SlicePredicate(column_names=['a']), 'key2', '', 1, ConsistencyLevel.ONE)
+        assert len(result) == 1
+        assert len(result[0].columns) == 0
+
+        # test column_names predicate
+        result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
+        assert len(result) == 3, result
+        assert result[0].columns[0].column.name == 'col1'
+        assert result[0].columns[1].column.name == 'col3'
+
+        # row limiting via count.
+        result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 1, ConsistencyLevel.ONE)
+        assert len(result) == 1
+
+        # test column slice predicate
+        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
+        assert len(result) == 2
+        assert result[0].key == 'key1'
+        assert result[1].key == 'key2'
+        assert len(result[0].columns) == 3
+        assert result[0].columns[0].column.name == 'col2'
+        assert result[0].columns[2].column.name == 'col4'
+
+        # col limiting via count
+        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=2)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
+        assert len(result[0].columns) == 2
+
+        # and reversed
+        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col4', finish='col2', reversed=True, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
+        assert result[0].columns[0].column.name == 'col4'
+        assert result[0].columns[2].column.name == 'col2'
+
+        # row limiting via count
+        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 1, ConsistencyLevel.ONE)
+        assert len(result) == 1
+
+        # removed data
+        client.remove('key1', ColumnPath('Standard1', column='col1'), 1, ConsistencyLevel.ONE)
+        result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange('', '')), 'key1', 'key2', 5, ConsistencyLevel.ONE)
+        assert len(result) == 2, result
+        assert result[0].columns[0].column.name == 'col2', result[0].columns[0].column.name
+        assert result[1].columns[0].column.name == 'col1'
+
+    def test_wrapped_range_slices(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        def copp_token(key):
+            # I cheated and generated this from Java
+            return {'a': '00530000000100000001',
+                    'b': '00540000000100000001',
+                    'c': '00550000000100000001',
+                    'd': '00560000000100000001',
+                    'e': '00580000000100000001'}[key]
+
+        for key in ['a', 'b', 'c', 'd', 'e']:
+            for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
+                client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
+        cp = ColumnParent('Standard1')
+
+        result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
+        assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e', ], [row.key for row in result]
+
+        result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
+        assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e', ], [row.key for row in result]
+
+    def test_get_slice_by_names(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1', 'Super1')
+
+        _insert_range()
+        p = SlicePredicate(column_names=['c1', 'c2'])
+        result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
+        assert len(result) == 2
+        assert result[0].column.name == 'c1'
+        assert result[1].column.name == 'c2'
+
+        _insert_super()
+        p = SlicePredicate(column_names=[_i64(4)])
+        result = client.get_slice('key1', ColumnParent('Super1', 'sc1'), p, ConsistencyLevel.ONE)
+        assert len(result) == 1
+        assert result[0].column.name == _i64(4)
+
+    def test_multiget_slice_with_compact_table(self):
+        """Insert multiple keys in a compact table and retrieve them using the multiget_slice interface"""
+        _set_keyspace('Keyspace1')
+
+        # create
+        cd = ColumnDef('v', 'AsciiType', None, None)
+        newcf = CfDef('Keyspace1', 'CompactColumnFamily', default_validation_class='AsciiType', column_metadata=[cd])
+        client.system_add_column_family(newcf)
+
+        CL = ConsistencyLevel.ONE
+        for i in range(0, 5):
+            client.insert('key' + str(i), ColumnParent('CompactColumnFamily'), Column('v', 'value' + str(i), 0), CL)
+        time.sleep(0.1)
+
+        p = SlicePredicate(column_names=['v'])
+        rows = client.multiget_slice(['key' + str(i) for i in range(0, 5)], ColumnParent('CompactColumnFamily'), p, ConsistencyLevel.ONE)
+
+        for i in range(0, 5):
+            key = 'key' + str(i)
+            assert key in rows
+            assert len(rows[key]) == 1
+            assert rows[key][0].column.name == 'v'
+            assert rows[key][0].column.value == 'value' + str(i)
+
+    def test_multiget_slice(self):
+        """Insert multiple keys and retrieve them using the multiget_slice interface"""
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        # Generate a list of 10 keys and insert them
+        num_keys = 10
+        keys = ['key' + str(i) for i in range(1, num_keys + 1)]
+        _insert_multi(keys)
+
+        # Retrieve all 10 key slices
+        rows = _big_multislice(keys, ColumnParent('Standard1'))
+
+        columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
+        # Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
+        for key in keys:
+            assert key in rows
+            assert columns == rows[key]
+
+    def test_multi_count(self):
+        """Insert multiple keys and count them using the multiget interface"""
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Standard1')
+
+        # Generate a list of 10 keys countaining 1 to 10 columns and insert them
+        num_keys = 10
+        for i in range(1, num_keys + 1):
+            key = 'key' + str(i)
+            for j in range(1, i + 1):
+                client.insert(key, ColumnParent('Standard1'), Column('c' + str(j), 'value' + str(j), 0), ConsistencyLevel.ONE)
+
+        # Count columns in all 10 keys
+        keys = ['key' + str(i) for i in range(1, num_keys + 1)]
+        p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
+        counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
+
+        # Check the returned counts
+        for i in range(1, num_keys + 1):
+            key = 'key' + str(i)
+            assert counts[key] == i
+
+    def test_batch_mutate_super_deletion(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        _insert_super('test')
+        d = Deletion(1, predicate=SlicePredicate(column_names=['sc1']))
+        cfmap = {'Super1': [Mutation(deletion=d)]}
+        client.batch_mutate({'test': cfmap}, ConsistencyLevel.ONE)
+        _expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE))
+
+    def test_super_reinsert(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        for x in range(3):
+            client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x), 'value', 1), ConsistencyLevel.ONE)
+
+        client.remove('key1', ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
+
+        for x in range(3):
+            client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x + 3), 'value', 3), ConsistencyLevel.ONE)
+
+        for n in range(1, 4):
+            p = SlicePredicate(slice_range=SliceRange('', '', False, n))
+            slice = client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)
+            assert len(slice) == n, "expected %s results; found %s" % (n, slice)
+
+    def test_describe_keyspace(self):
+        try:
+            client.system_drop_keyspace("ValidKsForUpdate")
+        except InvalidRequestException:
+            pass  # The keyspace doesn't exit, because this test was run in isolation.
+
+        kspaces = client.describe_keyspaces()
+        if self.cluster.version() >= '3.0':
+            assert len(kspaces) == 7, [x.name for x in kspaces]  # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth', 'system_distributed', 'system_schema']
+        elif self.cluster.version() >= '2.2':
+            assert len(kspaces) == 6, [x.name for x in kspaces]  # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth', 'system_distributed']
+        else:
+            assert len(kspaces) == 4, [x.name for x in kspaces]  # ['Keyspace2', 'Keyspace1', 'system', 'system_traces']
+
+        sysks = client.describe_keyspace("system")
+        assert sysks in kspaces
+
+        ks1 = client.describe_keyspace("Keyspace1")
+        assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
+        for cf in ks1.cf_defs:
+            if cf.name == "Standard1":
+                cf0 = cf
+                break
+        assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
+
+    def test_describe(self):
+        assert client.describe_cluster_name() == 'test'
+
+    def test_describe_ring(self):
+        assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
+
+    def test_describe_token_map(self):
+        # test/conf/cassandra.yaml specifies org.apache.cassandra.dht.ByteOrderedPartitioner
+        # which uses BytesToken, so this just tests that the string representation of the token
+        # matches a regex pattern for BytesToken.toString().
+        ring = list(client.describe_token_map().items())
+        if not self.dtest_config.use_vnodes:
+            assert len(ring) == 1
+        else:
+            assert len(ring) == int(self.dtest_config.num_tokens)
+        token, node = ring[0]
+        if self.dtest_config.use_vnodes:
+            assert re.match("[0-9A-Fa-f]{32}", token)
+        assert node == '127.0.0.1'
+
+    def test_describe_partitioner(self):
+        # Make sure this just reads back the values from the config.
+        assert client.describe_partitioner() == "org.apache.cassandra.dht.ByteOrderedPartitioner"
+
+    def test_describe_snitch(self):
+        assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
+
+    def test_invalid_ks_names(self):
+        def invalid_keyspace():
+            client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[]))
+        _expect_exception(invalid_keyspace, InvalidRequestException)
+
+    def test_invalid_strategy_class(self):
+        def add_invalid_keyspace():
+            client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
+        exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
+        s = str(exc)
+        assert s.find("InvalidStrategyClass") > -1, s
+        assert s.find("Unable to find replication strategy") > -1, s
+
+        def update_invalid_keyspace():
+            client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[]))
+            client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
+
+        exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
+        s = str(exc)
+        assert s.find("InvalidStrategyClass") > -1, s
+        assert s.find("Unable to find replication strategy") > -1, s
+
+    def test_invalid_cf_names(self):
+        def invalid_cf():
+            _set_keyspace('Keyspace1')
+            newcf = CfDef('Keyspace1', 'in-valid')
+            client.system_add_column_family(newcf)
+        _expect_exception(invalid_cf, InvalidRequestException)
+
+        def invalid_cf_inside_new_ks():
+            cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
+            _set_keyspace('system')
+            client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
+        _expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
+
+    def test_system_cf_recreate(self):
+        "ensures that keyspaces and column familes can be dropped and recreated in short order"
+        for x in range(2):
+
+            keyspace = 'test_cf_recreate'
+            cf_name = 'recreate_cf'
+
+            # create
+            newcf = CfDef(keyspace, cf_name)
+            newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[newcf])
+            client.system_add_keyspace(newks)
+            _set_keyspace(keyspace)
+
+            # insert
+            client.insert('key0', ColumnParent(cf_name), Column('colA', 'colA-value', 0), ConsistencyLevel.ONE)
+            col1 = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)[0].column
+            assert col1.name == 'colA' and col1.value == 'colA-value'
+
+            # drop
+            client.system_drop_column_family(cf_name)
+
+            # recreate
+            client.system_add_column_family(newcf)
+
+            # query
+            cosc_list = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)
+            # this was failing prior to CASSANDRA-1477.
+            assert len(cosc_list) == 0, 'cosc length test failed'
+
+            client.system_drop_keyspace(keyspace)
+
+    def test_system_keyspace_operations(self):
+        # create.  note large RF, this is OK
+        keyspace = KsDef('CreateKeyspace',
+                         'org.apache.cassandra.locator.SimpleStrategy',
+                         {'replication_factor': '10'},
+                         cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
+        client.system_add_keyspace(keyspace)
+        newks = client.describe_keyspace('CreateKeyspace')
+        assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
+
+        _set_keyspace('CreateKeyspace')
+
+        # modify valid
+        modified_keyspace = KsDef('CreateKeyspace',
+                                  'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
+                                  {'replication_factor': '1'},
+                                  cf_defs=[])
+        client.system_update_keyspace(modified_keyspace)
+        modks = client.describe_keyspace('CreateKeyspace')
+        assert modks.strategy_class == modified_keyspace.strategy_class
+        assert modks.strategy_options == modified_keyspace.strategy_options
+
+        # check strategy options are validated on modify
+        def modify_invalid_ks():
+            client.system_update_keyspace(KsDef('CreateKeyspace',
+                                                'org.apache.cassandra.locator.SimpleStrategy',
+                                                {},
+                                                cf_defs=[]))
+        _expect_exception(modify_invalid_ks, InvalidRequestException)
+
+        # drop
+        client.system_drop_keyspace('CreateKeyspace')
+
+        def get_second_ks():
+            client.describe_keyspace('CreateKeyspace')
+        _expect_exception(get_second_ks, NotFoundException)
+
+        # check strategy options are validated on creation
+        def create_invalid_ks():
+            client.system_add_keyspace(KsDef('InvalidKeyspace',
+                                             'org.apache.cassandra.locator.SimpleStrategy',
+                                             {},
+                                             cf_defs=[]))
+        _expect_exception(create_invalid_ks, InvalidRequestException)
+
+    def test_create_then_drop_ks(self):
+        keyspace = KsDef('AddThenDrop',
+                         strategy_class='org.apache.cassandra.locator.SimpleStrategy',
+                         strategy_options={'replication_factor': '1'},
+                         cf_defs=[])
+
+        def test_existence():
+            client.describe_keyspace(keyspace.name)
+        _expect_exception(test_existence, NotFoundException)
+        client.set_keyspace('system')
+        client.system_add_keyspace(keyspace)
+        test_existence()
+        client.system_drop_keyspace(keyspace.name)
+
+    def test_column_validators(self):
+        # columndef validation for regular CF
+        ks = 'Keyspace1'
+        _set_keyspace(ks)
+        cd = ColumnDef('col', 'LongType', None, None)
+        cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
+        client.system_add_column_family(cf)
+        ks_def = client.describe_keyspace(ks)
+        assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
+
+        cp = ColumnParent('ValidatorColumnFamily')
+        col0 = Column('col', _i64(42), 0)
+        col1 = Column('col', "ceci n'est pas 64bit", 0)
+        client.insert('key0', cp, col0, ConsistencyLevel.ONE)
+        e = _expect_exception(lambda: client.insert('key1', cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
+        assert e.why.find("failed validation") >= 0
+
+        # columndef validation for super CF
+        scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
+        client.system_add_column_family(scf)
+        ks_def = client.describe_keyspace(ks)
+        assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
+
+        scp = ColumnParent('ValidatorSuperColumnFamily', 'sc1')
+        client.insert('key0', scp, col0, ConsistencyLevel.ONE)
+        e = _expect_exception(lambda: client.insert('key1', scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
+        assert e.why.find("failed validation") >= 0
+
+        # columndef and cfdef default validation
+        cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
+        client.system_add_column_family(cf)
+        ks_def = client.describe_keyspace(ks)
+        assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
+
+        dcp = ColumnParent('DefaultValidatorColumnFamily')
+        # inserting a longtype into column 'col' is valid at the columndef level
+        client.insert('key0', dcp, col0, ConsistencyLevel.ONE)
+        # inserting a UTF8type into column

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[17/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/sstable_generation_loading_test.py
----------------------------------------------------------------------
diff --git a/sstable_generation_loading_test.py b/sstable_generation_loading_test.py
index 335f384..ab99a03 100644
--- a/sstable_generation_loading_test.py
+++ b/sstable_generation_loading_test.py
@@ -1,13 +1,17 @@
 import os
 import subprocess
 import time
-from distutils import dir_util
+import distutils.dir_util
+import pytest
+import logging
 
 from ccmlib import common as ccmcommon
 
-from dtest import Tester, debug, create_ks, create_cf
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import assert_all, assert_none, assert_one
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 # WARNING: sstableloader tests should be added to TestSSTableGenerationAndLoading (below),
@@ -16,12 +20,14 @@ from tools.decorators import since
 
 # Also used by upgrade_tests/storage_engine_upgrade_test
 # to test loading legacy sstables
-class BaseSStableLoaderTest(Tester):
-    __test__ = False
+class TestBaseSStableLoader(Tester):
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.allow_log_errors = True
+
     upgrade_from = None
     compact = False
-    jvm_args = ()
-    allow_log_errors = True
 
     def create_schema(self, session, ks, compression):
         create_ks(session, ks, rf=2)
@@ -29,34 +35,34 @@ class BaseSStableLoaderTest(Tester):
         create_cf(session, "counter1", compression=compression, columns={'v': 'counter'},
                   compact_storage=self.compact)
 
-    def sstableloader_compression_none_to_none_test(self):
+    def test_sstableloader_compression_none_to_none(self):
         self.load_sstable_with_configuration(None, None)
 
-    def sstableloader_compression_none_to_snappy_test(self):
+    def test_sstableloader_compression_none_to_snappy(self):
         self.load_sstable_with_configuration(None, 'Snappy')
 
-    def sstableloader_compression_none_to_deflate_test(self):
+    def test_sstableloader_compression_none_to_deflate(self):
         self.load_sstable_with_configuration(None, 'Deflate')
 
-    def sstableloader_compression_snappy_to_none_test(self):
+    def test_sstableloader_compression_snappy_to_none(self):
         self.load_sstable_with_configuration('Snappy', None)
 
-    def sstableloader_compression_snappy_to_snappy_test(self):
+    def test_sstableloader_compression_snappy_to_snappy(self):
         self.load_sstable_with_configuration('Snappy', 'Snappy')
 
-    def sstableloader_compression_snappy_to_deflate_test(self):
+    def test_sstableloader_compression_snappy_to_deflate(self):
         self.load_sstable_with_configuration('Snappy', 'Deflate')
 
-    def sstableloader_compression_deflate_to_none_test(self):
+    def test_sstableloader_compression_deflate_to_none(self):
         self.load_sstable_with_configuration('Deflate', None)
 
-    def sstableloader_compression_deflate_to_snappy_test(self):
+    def test_sstableloader_compression_deflate_to_snappy(self):
         self.load_sstable_with_configuration('Deflate', 'Snappy')
 
-    def sstableloader_compression_deflate_to_deflate_test(self):
+    def test_sstableloader_compression_deflate_to_deflate(self):
         self.load_sstable_with_configuration('Deflate', 'Deflate')
 
-    def sstableloader_with_mv_test(self):
+    def test_sstableloader_with_mv(self):
         """
         @jira_ticket CASSANDRA-11275
         """
@@ -70,32 +76,33 @@ class BaseSStableLoaderTest(Tester):
         self.load_sstable_with_configuration(ks='"Keyspace1"', create_schema=create_schema_with_mv)
 
     def copy_sstables(self, cluster, node):
-        for x in xrange(0, cluster.data_dir_count):
+        for x in range(0, cluster.data_dir_count):
             data_dir = os.path.join(node.get_path(), 'data{0}'.format(x))
             copy_root = os.path.join(node.get_path(), 'data{0}_copy'.format(x))
             for ddir in os.listdir(data_dir):
                 keyspace_dir = os.path.join(data_dir, ddir)
                 if os.path.isdir(keyspace_dir) and ddir != 'system':
                     copy_dir = os.path.join(copy_root, ddir)
-                    dir_util.copy_tree(keyspace_dir, copy_dir)
+                    distutils.dir_util.copy_tree(keyspace_dir, copy_dir)
 
     def load_sstables(self, cluster, node, ks):
         cdir = node.get_install_dir()
         sstableloader = os.path.join(cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
         env = ccmcommon.make_cassandra_env(cdir, node.get_path())
         host = node.address()
-        for x in xrange(0, cluster.data_dir_count):
+        for x in range(0, cluster.data_dir_count):
             sstablecopy_dir = os.path.join(node.get_path(), 'data{0}_copy'.format(x), ks.strip('"'))
             for cf_dir in os.listdir(sstablecopy_dir):
                 full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
                 if os.path.isdir(full_cf_dir):
                     cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
                     p = subprocess.Popen(cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
-                    exit_status = p.wait()
-                    debug('stdout: {out}'.format(out=p.stdout))
-                    debug('stderr: {err}'.format(err=p.stderr))
-                    self.assertEqual(0, exit_status,
-                                     "sstableloader exited with a non-zero status: {}".format(exit_status))
+                    stdout, stderr = p.communicate()
+                    exit_status = p.returncode
+                    logger.debug('stdout: {out}'.format(out=stdout.decode("utf-8")))
+                    logger.debug('stderr: {err}'.format(err=stderr.decode("utf-8")))
+                    assert 0 == exit_status, \
+                        "sstableloader exited with a non-zero status: {}".format(exit_status)
 
     def load_sstable_with_configuration(self, pre_compression=None, post_compression=None, ks="ks", create_schema=create_schema):
         """
@@ -109,24 +116,24 @@ class BaseSStableLoaderTest(Tester):
         NUM_KEYS = 1000
 
         for compression_option in (pre_compression, post_compression):
-            self.assertIn(compression_option, (None, 'Snappy', 'Deflate'))
+            assert compression_option in (None, 'Snappy', 'Deflate')
 
-        debug("Testing sstableloader with pre_compression=%s and post_compression=%s" % (pre_compression, post_compression))
+        logger.debug("Testing sstableloader with pre_compression=%s and post_compression=%s" % (pre_compression, post_compression))
         if self.upgrade_from:
-            debug("Testing sstableloader with upgrade_from=%s and compact=%s" % (self.upgrade_from, self.compact))
+            logger.debug("Testing sstableloader with upgrade_from=%s and compact=%s" % (self.upgrade_from, self.compact))
 
         cluster = self.cluster
         if self.upgrade_from:
-            debug("Generating sstables with version %s" % (self.upgrade_from))
+            logger.debug("Generating sstables with version %s" % (self.upgrade_from))
             default_install_dir = self.cluster.get_install_dir()
             # Forcing cluster version on purpose
             cluster.set_install_dir(version=self.upgrade_from)
-        debug("Using jvm_args={}".format(self.jvm_args))
+        logger.debug("Using jvm_args={}".format(self.jvm_args))
         cluster.populate(2).start(jvm_args=list(self.jvm_args))
         node1, node2 = cluster.nodelist()
         time.sleep(.5)
 
-        debug("creating keyspace and inserting")
+        logger.debug("creating keyspace and inserting")
         session = self.cql_connection(node1)
         self.create_schema(session, ks, pre_compression)
 
@@ -139,28 +146,28 @@ class BaseSStableLoaderTest(Tester):
         node2.nodetool('drain')
         node2.stop()
 
-        debug("Making a copy of the sstables")
+        logger.debug("Making a copy of the sstables")
         # make a copy of the sstables
         self.copy_sstables(cluster, node1)
 
-        debug("Wiping out the data and restarting cluster")
+        logger.debug("Wiping out the data and restarting cluster")
         # wipe out the node data.
         cluster.clear()
 
         if self.upgrade_from:
-            debug("Running sstableloader with version from %s" % (default_install_dir))
+            logger.debug("Running sstableloader with version from %s" % (default_install_dir))
             # Return to previous version
             cluster.set_install_dir(install_dir=default_install_dir)
 
         cluster.start(jvm_args=list(self.jvm_args))
         time.sleep(5)  # let gossip figure out what is going on
 
-        debug("re-creating the keyspace and column families.")
+        logger.debug("re-creating the keyspace and column families.")
         session = self.cql_connection(node1)
         self.create_schema(session, ks, post_compression)
         time.sleep(2)
 
-        debug("Calling sstableloader")
+        logger.debug("Calling sstableloader")
         # call sstableloader to re-load each cf.
         self.load_sstables(cluster, node1, ks)
 
@@ -171,42 +178,41 @@ class BaseSStableLoaderTest(Tester):
                 query = "SELECT * FROM counter1 WHERE KEY='{}'".format(i)
                 assert_one(session, query, [str(i), 1])
 
-        debug("Reading data back")
+        logger.debug("Reading data back")
         # Now we should have sstables with the loaded data, and the existing
         # data. Lets read it all to make sure it is all there.
         read_and_validate_data(session)
 
-        debug("scrubbing, compacting, and repairing")
+        logger.debug("scrubbing, compacting, and repairing")
         # do some operations and try reading the data again.
         node1.nodetool('scrub')
         node1.nodetool('compact')
         node1.nodetool('repair')
 
-        debug("Reading data back one more time")
+        logger.debug("Reading data back one more time")
         read_and_validate_data(session)
 
         # check that RewindableDataInputStreamPlus spill files are properly cleaned up
         if self.upgrade_from:
-            for x in xrange(0, cluster.data_dir_count):
+            for x in range(0, cluster.data_dir_count):
                 data_dir = os.path.join(node1.get_path(), 'data{0}'.format(x))
                 for ddir in os.listdir(data_dir):
                     keyspace_dir = os.path.join(data_dir, ddir)
                     temp_files = self.glob_data_dirs(os.path.join(keyspace_dir, '*', "tmp", "*.dat"))
-                    debug("temp files: " + str(temp_files))
-                    self.assertEquals(0, len(temp_files), "Temporary files were not cleaned up.")
+                    logger.debug("temp files: " + str(temp_files))
+                    assert 0 == len(temp_files), "Temporary files were not cleaned up."
 
 
-class TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
-    __test__ = True
+class TestSSTableGenerationAndLoading(TestBaseSStableLoader):
 
-    def sstableloader_uppercase_keyspace_name_test(self):
+    def test_sstableloader_uppercase_keyspace_name(self):
         """
         Make sure sstableloader works with upper case keyspace
         @jira_ticket CASSANDRA-10806
         """
         self.load_sstable_with_configuration(ks='"Keyspace1"')
 
-    def incompressible_data_in_compressed_table_test(self):
+    def test_incompressible_data_in_compressed_table(self):
         """
         tests for the bug that caused #3370:
         https://issues.apache.org/jira/browse/CASSANDRA-3370
@@ -227,10 +233,10 @@ class TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
         create_cf(session, 'cf', compression="Deflate")
 
         # make unique column names, and values that are incompressible
-        for col in xrange(10):
+        for col in range(10):
             col_name = str(col)
             col_val = os.urandom(5000)
-            col_val = col_val.encode('hex')
+            col_val = col_val.hex()
             cql = "UPDATE cf SET v='%s' WHERE KEY='0' AND c='%s'" % (col_val, col_name)
             # print cql
             session.execute(cql)
@@ -238,9 +244,9 @@ class TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
         node1.flush()
         time.sleep(2)
         rows = list(session.execute("SELECT * FROM cf WHERE KEY = '0' AND c < '8'"))
-        self.assertGreater(len(rows), 0)
+        assert len(rows) > 0
 
-    def remove_index_file_test(self):
+    def test_remove_index_file(self):
         """
         tests for situations similar to that found in #343:
         https://issues.apache.org/jira/browse/CASSANDRA-343
@@ -280,9 +286,9 @@ class TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
             for fname in os.listdir(path):
                 if fname.endswith('Data.db'):
                     data_found += 1
-        self.assertGreater(data_found, 0, "After removing index, filter, stats, and digest files, the data file was deleted!")
+        assert data_found > 0, "After removing index, filter, stats, and digest files > the data file was deleted!"
 
-    def sstableloader_with_mv_test(self):
+    def test_sstableloader_with_mv(self):
         """
         @jira_ticket CASSANDRA-11275
         """
@@ -296,7 +302,7 @@ class TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
         self.load_sstable_with_configuration(ks='"Keyspace1"', create_schema=create_schema_with_mv)
 
     @since('4.0')
-    def sstableloader_with_failing_2i_test(self):
+    def test_sstableloader_with_failing_2i(self):
         """
         @jira_ticket CASSANDRA-10130
 
@@ -341,7 +347,7 @@ class TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
 
         # Load SSTables with a failure during index creation
         node.byteman_submit(['./byteman/index_build_failure.btm'])
-        with self.assertRaises(Exception):
+        with pytest.raises(Exception):
             self.load_sstables(cluster, node, 'k')
 
         # Check that the index isn't marked as built and the old SSTable data has been loaded but not indexed

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/sstablesplit_test.py
----------------------------------------------------------------------
diff --git a/sstablesplit_test.py b/sstablesplit_test.py
index 371f91a..382f618 100644
--- a/sstablesplit_test.py
+++ b/sstablesplit_test.py
@@ -1,15 +1,17 @@
-from __future__ import division
-
 import time
+import logging
+
 from math import floor
 from os.path import getsize
 
-from dtest import Tester, debug
+from dtest import Tester
+
+logger = logging.getLogger(__name__)
 
 
 class TestSSTableSplit(Tester):
 
-    def split_test(self):
+    def test_split(self):
         """
         Check that after running compaction, sstablessplit can succesfully split
         The resultant sstable.  Check that split is reversible and that data is readable
@@ -20,7 +22,7 @@ class TestSSTableSplit(Tester):
         node = cluster.nodelist()[0]
         version = cluster.version()
 
-        debug("Run stress to insert data")
+        logger.debug("Run stress to insert data")
 
         node.stress(['write', 'n=1000', 'no-warmup', '-rate', 'threads=50',
                      '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)'])
@@ -30,20 +32,20 @@ class TestSSTableSplit(Tester):
         self._do_compaction(node)
         self._do_split(node, version)
 
-        debug("Run stress to ensure data is readable")
+        logger.debug("Run stress to ensure data is readable")
         node.stress(['read', 'n=1000', '-rate', 'threads=25',
                      '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)'])
 
     def _do_compaction(self, node):
-        debug("Compact sstables.")
+        logger.debug("Compact sstables.")
         node.flush()
         node.compact()
         keyspace = 'keyspace1'
         sstables = node.get_sstables(keyspace, '')
-        debug("Number of sstables after compaction: %s" % len(sstables))
+        logger.debug("Number of sstables after compaction: %s" % len(sstables))
 
     def _do_split(self, node, version):
-        debug("Run sstablesplit")
+        logger.debug("Run sstablesplit")
         time.sleep(5.0)
         node.stop()
 
@@ -55,7 +57,7 @@ class TestSSTableSplit(Tester):
         # get the initial sstables and their total size
         origsstables = node.get_sstables(keyspace, '')
         origsstable_size = sum([getsize(sstable) for sstable in origsstables])
-        debug("Original sstable and sizes before split: {}".format([(name, getsize(name)) for name in origsstables]))
+        logger.debug("Original sstable and sizes before split: {}".format([(name, getsize(name)) for name in origsstables]))
 
         # calculate the expected number of sstables post-split
         expected_num_sstables = floor(origsstable_size / expected_sstable_size)
@@ -65,24 +67,24 @@ class TestSSTableSplit(Tester):
                                        no_snapshot=True, debug=True)
 
         for (out, error, rc) in result:
-            debug("stdout: {}".format(out))
-            debug("stderr: {}".format(error))
-            debug("rc: {}".format(rc))
+            logger.debug("stdout: {}".format(out))
+            logger.debug("stderr: {}".format(error))
+            logger.debug("rc: {}".format(rc))
 
         # get the sstables post-split and their total size
         sstables = node.get_sstables(keyspace, '')
-        debug("Number of sstables after split: %s. expected %s" % (len(sstables), expected_num_sstables))
-        self.assertLessEqual(expected_num_sstables, len(sstables) + 1)
-        self.assertLessEqual(1, len(sstables))
+        logger.debug("Number of sstables after split: %s. expected %s" % (len(sstables), expected_num_sstables))
+        assert expected_num_sstables <= len(sstables) + 1
+        assert 1 <= len(sstables)
 
         # make sure none of the tables are bigger than the max expected size
         sstable_sizes = [getsize(sstable) for sstable in sstables]
         # add a bit extra for overhead
-        self.assertLessEqual(max(sstable_sizes), expected_sstable_size + 512)
+        assert max(sstable_sizes) <= expected_sstable_size + 512
         # make sure node can start with changed sstables
         node.start(wait_for_binary_proto=True)
 
-    def single_file_split_test(self):
+    def test_single_file_split(self):
         """
         Covers CASSANDRA-8623
 
@@ -92,7 +94,7 @@ class TestSSTableSplit(Tester):
         cluster.populate(1).start(wait_for_binary_proto=True)
         node = cluster.nodelist()[0]
 
-        debug("Run stress to insert data")
+        logger.debug("Run stress to insert data")
         node.stress(['write', 'n=300', 'no-warmup', '-rate', 'threads=50',
                      '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)'])
 
@@ -101,9 +103,9 @@ class TestSSTableSplit(Tester):
         result = node.run_sstablesplit(keyspace='keyspace1', size=1, no_snapshot=True)
 
         for (stdout, stderr, rc) in result:
-            debug(stderr)
-            failure = stderr.find("java.lang.AssertionError: Data component is missing")
-            self.assertEqual(failure, -1, "Error during sstablesplit")
+            logger.debug(stderr.decode("utf-8"))
+            failure = stderr.decode("utf-8").find("java.lang.AssertionError: Data component is missing")
+            assert failure, -1 == "Error during sstablesplit"
 
         sstables = node.get_sstables('keyspace1', '')
-        self.assertGreaterEqual(len(sstables), 1, sstables)
+        assert len(sstables), 1 >= sstables

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/sstableutil_test.py
----------------------------------------------------------------------
diff --git a/sstableutil_test.py b/sstableutil_test.py
index 0886a26..30584c1 100644
--- a/sstableutil_test.py
+++ b/sstableutil_test.py
@@ -2,14 +2,18 @@ import glob
 import os
 import subprocess
 import time
+import pytest
+import logging
 
 from ccmlib import common
 from ccmlib.node import ToolError
 
-from dtest import Tester, debug
-from tools.decorators import since
+from dtest import Tester
 from tools.intervention import InterruptCompaction
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 # These must match the stress schema names
 KeyspaceName = 'keyspace1'
 TableName = 'standard1'
@@ -24,9 +28,9 @@ def _normcase_all(xs):
 
 
 @since('3.0')
-class SSTableUtilTest(Tester):
+class TestSSTableUtil(Tester):
 
-    def compaction_test(self):
+    def test_compaction(self):
         """
         @jira_ticket CASSANDRA-7066
 
@@ -38,14 +42,14 @@ class SSTableUtilTest(Tester):
 
         self._create_data(node, KeyspaceName, TableName, 100000)
         finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
-        self.assertEqual(0, len(tmpfiles))
+        assert 0 == len(tmpfiles)
 
         node.compact()
         time.sleep(5)
         finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
-        self.assertEqual(0, len(tmpfiles))
+        assert 0 == len(tmpfiles)
 
-    def abortedcompaction_test(self):
+    def test_abortedcompaction(self):
         """
         @jira_ticket CASSANDRA-7066
         @jira_ticket CASSANDRA-11497
@@ -61,14 +65,14 @@ class SSTableUtilTest(Tester):
 
         self._create_data(node, KeyspaceName, TableName, numrecords)
         finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
-        self.assertTrue(len(finalfiles) > 0, "Expected to find some final files")
-        self.assertEqual(0, len(tmpfiles), "Expected no tmp files")
+        assert len(finalfiles) > 0, "Expected to find some final files"
+        assert 0 == len(tmpfiles), "Expected no tmp files"
 
         t = InterruptCompaction(node, TableName, filename=log_file_name, delay=2)
         t.start()
 
         try:
-            debug("Compacting...")
+            logger.debug("Compacting...")
             node.compact()
         except ToolError:
             pass  # expected to fail
@@ -81,7 +85,7 @@ class SSTableUtilTest(Tester):
         # In most cases we should end up with some temporary files to clean up, but it may happen
         # that no temporary files are created if compaction finishes too early or starts too late
         # see CASSANDRA-11497
-        debug("Got {} final files and {} tmp files after compaction was interrupted"
+        logger.debug("Got {} final files and {} tmp files after compaction was interrupted"
               .format(len(finalfiles), len(tmpfiles)))
 
         self._invoke_sstableutil(KeyspaceName, TableName, cleanup=True)
@@ -89,15 +93,15 @@ class SSTableUtilTest(Tester):
         self._check_files(node, KeyspaceName, TableName, finalfiles, [])
 
         # restart to make sure not data is lost
-        debug("Restarting node...")
+        logger.debug("Restarting node...")
         node.start(wait_for_binary_proto=True)
         # in some environments, a compaction may start that would change sstable files. We should wait if so
         node.wait_for_compactions()
 
         finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
-        self.assertEqual(0, len(tmpfiles))
+        assert 0 == len(tmpfiles)
 
-        debug("Running stress to ensure data is readable")
+        logger.debug("Running stress to ensure data is readable")
         self._read_data(node, numrecords)
 
     def _create_data(self, node, ks, table, numrecords):
@@ -132,17 +136,17 @@ class SSTableUtilTest(Tester):
         else:
             expected_tmpfiles = _normcase_all(expected_tmpfiles)
 
-        debug("Comparing all files...")
-        self.assertEqual(sstablefiles, allfiles)
+        logger.debug("Comparing all files...")
+        assert sstablefiles == allfiles
 
-        debug("Comparing final files...")
-        self.assertEqual(expected_finalfiles, finalfiles)
+        logger.debug("Comparing final files...")
+        assert expected_finalfiles == finalfiles
 
-        debug("Comparing tmp files...")
-        self.assertEqual(expected_tmpfiles, tmpfiles)
+        logger.debug("Comparing tmp files...")
+        assert expected_tmpfiles == tmpfiles
 
-        debug("Comparing op logs...")
-        self.assertEqual(expected_oplogs, oplogs)
+        logger.debug("Comparing op logs...")
+        assert expected_oplogs == oplogs
 
         return finalfiles, tmpfiles
 
@@ -150,7 +154,7 @@ class SSTableUtilTest(Tester):
         """
         Invoke sstableutil and return the list of files, if any
         """
-        debug("About to invoke sstableutil with type {}...".format(type))
+        logger.debug("About to invoke sstableutil with type {}...".format(type))
         node1 = self.cluster.nodelist()[0]
         env = common.make_cassandra_env(node1.get_install_cassandra_root(), node1.get_node_cassandra_root())
         tool_bin = node1.get_tool('sstableutil')
@@ -168,14 +172,14 @@ class SSTableUtilTest(Tester):
 
         (stdout, stderr) = p.communicate()
 
-        self.assertEqual(p.returncode, 0, "Error invoking sstableutil; returned {code}".format(code=p.returncode))
+        assert p.returncode == 0, "Error invoking sstableutil; returned {code}".format(code=p.returncode)
 
         if stdout:
-            debug(stdout)
+            logger.debug(stdout.decode("utf-8"))
 
         match = ks + os.sep + table + '-'
-        ret = sorted(filter(lambda s: match in s, stdout.splitlines()))
-        debug("Got {} files of type {}".format(len(ret), type))
+        ret = sorted([s for s in stdout.decode("utf-8").splitlines() if match in s])
+        logger.debug("Got {} files of type {}".format(len(ret), type))
         return ret
 
     def _get_sstable_files(self, node, ks, table):
@@ -184,9 +188,10 @@ class SSTableUtilTest(Tester):
         """
         ret = []
         for data_dir in node.data_directories():
-            keyspace_dir = os.path.join(data_dir, ks)
+            # note, the /var/folders -> /private/var/folders stuff is to fixup mac compatibility
+            keyspace_dir = os.path.abspath(os.path.join(data_dir, ks)).replace("/var/folders", "/private/var/folders")
             for ext in ('*.db', '*.txt', '*.adler32', '*.crc32'):
-                ret.extend(glob.glob(os.path.join(keyspace_dir, table + '-*', ext)))
+                ret.extend(glob.glob(os.path.abspath(os.path.join(keyspace_dir, table + '-*', ext))))
 
         return sorted(ret)
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/stress_tool_test.py
----------------------------------------------------------------------
diff --git a/stress_tool_test.py b/stress_tool_test.py
index d7f43c0..9a1ccd2 100644
--- a/stress_tool_test.py
+++ b/stress_tool_test.py
@@ -1,8 +1,11 @@
-from __future__ import division
+import pytest
+import logging
 
 from dtest import Tester
 from tools.data import rows_to_list
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('3.0')
@@ -13,7 +16,7 @@ class TestStressSparsenessRatio(Tester):
     Tests for the `row-population-ratio` parameter to `cassandra-stress`.
     """
 
-    def uniform_ratio_test(self):
+    def test_uniform_ratio(self):
         """
         Tests that the ratio-specifying string 'uniform(5..15)/50' results in
         ~80% of the values written being non-null.
@@ -22,7 +25,7 @@ class TestStressSparsenessRatio(Tester):
                                    expected_ratio=.8,
                                    delta=.1)
 
-    def fixed_ratio_test(self):
+    def test_fixed_ratio(self):
         """
         Tests that the string 'fixed(1)/3' results in ~1/3 of the values
         written being non-null.
@@ -50,4 +53,4 @@ class TestStressSparsenessRatio(Tester):
         num_nones = sum(row.count(None) for row in written)
         num_results = sum(len(row) for row in written)
 
-        self.assertAlmostEqual(float(num_nones) / num_results, expected_ratio, delta=delta)
+        assert abs(float(num_nones) / num_results - expected_ratio) <= delta

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/super_column_cache_test.py
----------------------------------------------------------------------
diff --git a/super_column_cache_test.py b/super_column_cache_test.py
index 405d883..f0147a8 100644
--- a/super_column_cache_test.py
+++ b/super_column_cache_test.py
@@ -1,20 +1,32 @@
+import pytest
+import logging
+
+from dtest_setup_overrides import DTestSetupOverrides
+
 from dtest import Tester
-from thrift_bindings.v22.ttypes import \
+from thrift_bindings.thrift010.ttypes import \
     ConsistencyLevel as ThriftConsistencyLevel
-from thrift_bindings.v22.ttypes import (CfDef, Column, ColumnOrSuperColumn,
+from thrift_bindings.thrift010.ttypes import (CfDef, Column, ColumnOrSuperColumn,
                                         ColumnParent, KsDef, Mutation,
                                         SlicePredicate, SliceRange,
                                         SuperColumn)
-from thrift_tests import get_thrift_client
+from thrift_test import get_thrift_client
 from tools.misc import ImmutableMapping
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('2.0', max_version='4')
 class TestSCCache(Tester):
-    cluster_options = ImmutableMapping({'start_rpc': 'true'})
 
-    def sc_with_row_cache_test(self):
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        dtest_setup_overrides.cluster_options = ImmutableMapping({'start_rpc': 'true'})
+        return dtest_setup_overrides
+
+    def test_sc_with_row_cache(self):
         """ Test for bug reported in #4190 """
         cluster = self.cluster
 
@@ -57,12 +69,12 @@ class TestSCCache(Tester):
         column_parent = ColumnParent(column_family='Users')
         predicate = SlicePredicate(slice_range=SliceRange("", "", False, 100))
         super_columns = client.get_slice('mina', column_parent, predicate, ThriftConsistencyLevel.ONE)
-        self.assertEqual(1, len(super_columns))
+        assert 1 == len(super_columns)
         super_column = super_columns[0].super_column
-        self.assertEqual('attrs', super_column.name)
-        self.assertEqual(1, len(super_column.columns))
-        self.assertEqual('name', super_column.columns[0].name)
-        self.assertEqual('Mina', super_column.columns[0].value)
+        assert 'attrs' == super_column.name
+        assert 1 == len(super_column.columns)
+        assert 'name' == super_column.columns[0].name
+        assert 'Mina' == super_column.columns[0].value
 
         # add a 'country' subcolumn
         column = Column(name='country', value='Canada', timestamp=100)
@@ -71,16 +83,16 @@ class TestSCCache(Tester):
             ThriftConsistencyLevel.ONE)
 
         super_columns = client.get_slice('mina', column_parent, predicate, ThriftConsistencyLevel.ONE)
-        self.assertEqual(1, len(super_columns))
+        assert 1 == len(super_columns)
         super_column = super_columns[0].super_column
-        self.assertEqual('attrs', super_column.name)
-        self.assertEqual(2, len(super_column.columns))
+        assert 'attrs' == super_column.name
+        assert 2 == len(super_column.columns)
 
-        self.assertEqual('country', super_column.columns[0].name)
-        self.assertEqual('Canada', super_column.columns[0].value)
+        assert 'country' == super_column.columns[0].name
+        assert 'Canada' == super_column.columns[0].value
 
-        self.assertEqual('name', super_column.columns[1].name)
-        self.assertEqual('Mina', super_column.columns[1].value)
+        assert 'name' == super_column.columns[1].name
+        assert 'Mina' == super_column.columns[1].value
 
         # add a 'region' subcolumn
         column = Column(name='region', value='Quebec', timestamp=100)
@@ -89,16 +101,16 @@ class TestSCCache(Tester):
             ThriftConsistencyLevel.ONE)
 
         super_columns = client.get_slice('mina', column_parent, predicate, ThriftConsistencyLevel.ONE)
-        self.assertEqual(1, len(super_columns))
+        assert 1 == len(super_columns)
         super_column = super_columns[0].super_column
-        self.assertEqual('attrs', super_column.name)
-        self.assertEqual(3, len(super_column.columns))
+        assert 'attrs' == super_column.name
+        assert 3 == len(super_column.columns)
 
-        self.assertEqual('country', super_column.columns[0].name)
-        self.assertEqual('Canada', super_column.columns[0].value)
+        assert 'country' == super_column.columns[0].name
+        assert 'Canada' == super_column.columns[0].value
 
-        self.assertEqual('name', super_column.columns[1].name)
-        self.assertEqual('Mina', super_column.columns[1].value)
+        assert 'name' == super_column.columns[1].name
+        assert 'Mina' == super_column.columns[1].value
 
-        self.assertEqual('region', super_column.columns[2].name)
-        self.assertEqual('Quebec', super_column.columns[2].value)
+        assert 'region' == super_column.columns[2].name
+        assert 'Quebec' == super_column.columns[2].value

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/super_counter_test.py
----------------------------------------------------------------------
diff --git a/super_counter_test.py b/super_counter_test.py
index 7a4c63b..b9ca007 100644
--- a/super_counter_test.py
+++ b/super_counter_test.py
@@ -1,12 +1,17 @@
 import time
+import pytest
+import logging
 
-from cql.cassandra.ttypes import (CfDef, ColumnParent, ColumnPath,
-                                  ConsistencyLevel, CounterColumn)
-from dtest import Tester, debug, create_ks
-from thrift_tests import get_thrift_client
+from dtest_setup_overrides import DTestSetupOverrides
+from dtest import Tester, create_ks
+from thrift_test import get_thrift_client
 from tools.misc import ImmutableMapping
 
-from tools.decorators import since
+from thrift_bindings.thrift010.Cassandra import (CfDef, ColumnParent, ColumnPath,
+                                                 ConsistencyLevel, CounterColumn)
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('2.0', max_version='4')
@@ -15,9 +20,13 @@ class TestSuperCounterClusterRestart(Tester):
     This test is part of this issue:
     https://issues.apache.org/jira/browse/CASSANDRA-3821
     """
-    cluster_options = ImmutableMapping({'start_rpc': 'true'})
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        dtest_setup_overrides.cluster_options = ImmutableMapping({'start_rpc': 'true'})
+        return dtest_setup_overrides
 
-    def functional_test(self):
+    def test_functional(self):
         NUM_SUBCOLS = 100
         NUM_ADDS = 100
 
@@ -42,8 +51,8 @@ class TestSuperCounterClusterRestart(Tester):
         # let the sediment settle to to the bottom before drinking...
         time.sleep(2)
 
-        for subcol in xrange(NUM_SUBCOLS):
-            for add in xrange(NUM_ADDS):
+        for subcol in range(NUM_SUBCOLS):
+            for add in range(NUM_ADDS):
                 column_parent = ColumnParent(column_family='cf',
                                              super_column='subcol_%d' % subcol)
                 counter_column = CounterColumn('col_0', 1)
@@ -52,10 +61,10 @@ class TestSuperCounterClusterRestart(Tester):
         time.sleep(1)
         cluster.flush()
 
-        debug("Stopping cluster")
+        logger.debug("Stopping cluster")
         cluster.stop()
         time.sleep(5)
-        debug("Starting cluster")
+        logger.debug("Starting cluster")
         cluster.start()
         time.sleep(5)
 
@@ -65,17 +74,17 @@ class TestSuperCounterClusterRestart(Tester):
 
         from_db = []
 
-        for i in xrange(NUM_SUBCOLS):
+        for i in range(NUM_SUBCOLS):
             column_path = ColumnPath(column_family='cf', column='col_0',
                                      super_column='subcol_%d' % i)
             column_or_super_column = thrift_conn.get('row_0', column_path,
                                                      ConsistencyLevel.QUORUM)
             val = column_or_super_column.counter_column.value
-            debug(str(val)),
+            logger.debug(str(val)),
             from_db.append(val)
-        debug("")
+        logger.debug("")
 
-        expected = [NUM_ADDS for i in xrange(NUM_SUBCOLS)]
+        expected = [NUM_ADDS for i in range(NUM_SUBCOLS)]
 
         if from_db != expected:
             raise Exception("Expected a bunch of the same values out of the db. Got this: " + str(from_db))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/system_keyspaces_test.py
----------------------------------------------------------------------
diff --git a/system_keyspaces_test.py b/system_keyspaces_test.py
index 2a5c099..c8ddd77 100644
--- a/system_keyspaces_test.py
+++ b/system_keyspaces_test.py
@@ -1,7 +1,13 @@
+import pytest
+import logging
+
 from cassandra import Unauthorized
 from dtest import Tester
 from tools.assertions import assert_all, assert_exception, assert_none
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 
 class TestSystemKeyspaces(Tester):

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/thrift010/Cassandra-remote
----------------------------------------------------------------------
diff --git a/thrift_bindings/thrift010/Cassandra-remote b/thrift_bindings/thrift010/Cassandra-remote
new file mode 100644
index 0000000..6f3daa9
--- /dev/null
+++ b/thrift_bindings/thrift010/Cassandra-remote
@@ -0,0 +1,425 @@
+#!/usr/bin/env python
+#
+# Autogenerated by Thrift Compiler (0.10.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+import sys
+import pprint
+if sys.version_info[0] > 2:
+    from urllib.parse import urlparse
+else:
+    from urlparse import urlparse
+from thrift.transport import TTransport, TSocket, TSSLSocket, THttpClient
+from thrift.protocol.TBinaryProtocol import TBinaryProtocol
+
+from cassandra import Cassandra
+from cassandra.ttypes import *
+
+if len(sys.argv) <= 1 or sys.argv[1] == '--help':
+    print('')
+    print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] [-novalidate] [-ca_certs certs] [-keyfile keyfile] [-certfile certfile] function [arg1 [arg2...]]')
+    print('')
+    print('Functions:')
+    print('  void login(AuthenticationRequest auth_request)')
+    print('  void set_keyspace(string keyspace)')
+    print('  ColumnOrSuperColumn get(string key, ColumnPath column_path, ConsistencyLevel consistency_level)')
+    print('   get_slice(string key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)')
+    print('  i32 get_count(string key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)')
+    print('   multiget_slice( keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)')
+    print('   multiget_count( keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level)')
+    print('   get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)')
+    print('   get_paged_slice(string column_family, KeyRange range, string start_column, ConsistencyLevel consistency_level)')
+    print('   get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level)')
+    print('  void insert(string key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)')
+    print('  void add(string key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)')
+    print('  CASResult cas(string key, string column_family,  expected,  updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel commit_consistency_level)')
+    print('  void remove(string key, ColumnPath column_path, i64 timestamp, ConsistencyLevel consistency_level)')
+    print('  void remove_counter(string key, ColumnPath path, ConsistencyLevel consistency_level)')
+    print('  void batch_mutate( mutation_map, ConsistencyLevel consistency_level)')
+    print('  void atomic_batch_mutate( mutation_map, ConsistencyLevel consistency_level)')
+    print('  void truncate(string cfname)')
+    print('   get_multi_slice(MultiSliceRequest request)')
+    print('   describe_schema_versions()')
+    print('   describe_keyspaces()')
+    print('  string describe_cluster_name()')
+    print('  string describe_version()')
+    print('   describe_ring(string keyspace)')
+    print('   describe_local_ring(string keyspace)')
+    print('   describe_token_map()')
+    print('  string describe_partitioner()')
+    print('  string describe_snitch()')
+    print('  KsDef describe_keyspace(string keyspace)')
+    print('   describe_splits(string cfName, string start_token, string end_token, i32 keys_per_split)')
+    print('  string trace_next_query()')
+    print('   describe_splits_ex(string cfName, string start_token, string end_token, i32 keys_per_split)')
+    print('  string system_add_column_family(CfDef cf_def)')
+    print('  string system_drop_column_family(string column_family)')
+    print('  string system_add_keyspace(KsDef ks_def)')
+    print('  string system_drop_keyspace(string keyspace)')
+    print('  string system_update_keyspace(KsDef ks_def)')
+    print('  string system_update_column_family(CfDef cf_def)')
+    print('  CqlResult execute_cql_query(string query, Compression compression)')
+    print('  CqlResult execute_cql3_query(string query, Compression compression, ConsistencyLevel consistency)')
+    print('  CqlPreparedResult prepare_cql_query(string query, Compression compression)')
+    print('  CqlPreparedResult prepare_cql3_query(string query, Compression compression)')
+    print('  CqlResult execute_prepared_cql_query(i32 itemId,  values)')
+    print('  CqlResult execute_prepared_cql3_query(i32 itemId,  values, ConsistencyLevel consistency)')
+    print('  void set_cql_version(string version)')
+    print('')
+    sys.exit(0)
+
+pp = pprint.PrettyPrinter(indent=2)
+host = 'localhost'
+port = 9090
+uri = ''
+framed = False
+ssl = False
+validate = True
+ca_certs = None
+keyfile = None
+certfile = None
+http = False
+argi = 1
+
+if sys.argv[argi] == '-h':
+    parts = sys.argv[argi + 1].split(':')
+    host = parts[0]
+    if len(parts) > 1:
+        port = int(parts[1])
+    argi += 2
+
+if sys.argv[argi] == '-u':
+    url = urlparse(sys.argv[argi + 1])
+    parts = url[1].split(':')
+    host = parts[0]
+    if len(parts) > 1:
+        port = int(parts[1])
+    else:
+        port = 80
+    uri = url[2]
+    if url[4]:
+        uri += '?%s' % url[4]
+    http = True
+    argi += 2
+
+if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
+    framed = True
+    argi += 1
+
+if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':
+    ssl = True
+    argi += 1
+
+if sys.argv[argi] == '-novalidate':
+    validate = False
+    argi += 1
+
+if sys.argv[argi] == '-ca_certs':
+    ca_certs = sys.argv[argi+1]
+    argi += 2
+
+if sys.argv[argi] == '-keyfile':
+    keyfile = sys.argv[argi+1]
+    argi += 2
+
+if sys.argv[argi] == '-certfile':
+    certfile = sys.argv[argi+1]
+    argi += 2
+
+cmd = sys.argv[argi]
+args = sys.argv[argi + 1:]
+
+if http:
+    transport = THttpClient.THttpClient(host, port, uri)
+else:
+    if ssl:
+        socket = TSSLSocket.TSSLSocket(host, port, validate=validate, ca_certs=ca_certs, keyfile=keyfile, certfile=certfile)
+    else:
+        socket = TSocket.TSocket(host, port)
+    if framed:
+        transport = TTransport.TFramedTransport(socket)
+    else:
+        transport = TTransport.TBufferedTransport(socket)
+protocol = TBinaryProtocol(transport)
+client = Cassandra.Client(protocol)
+transport.open()
+
+if cmd == 'login':
+    if len(args) != 1:
+        print('login requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.login(eval(args[0]),))
+
+elif cmd == 'set_keyspace':
+    if len(args) != 1:
+        print('set_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.set_keyspace(args[0],))
+
+elif cmd == 'get':
+    if len(args) != 3:
+        print('get requires 3 args')
+        sys.exit(1)
+    pp.pprint(client.get(args[0], eval(args[1]), eval(args[2]),))
+
+elif cmd == 'get_slice':
+    if len(args) != 4:
+        print('get_slice requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_slice(args[0], eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'get_count':
+    if len(args) != 4:
+        print('get_count requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_count(args[0], eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'multiget_slice':
+    if len(args) != 4:
+        print('multiget_slice requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.multiget_slice(eval(args[0]), eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'multiget_count':
+    if len(args) != 4:
+        print('multiget_count requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.multiget_count(eval(args[0]), eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'get_range_slices':
+    if len(args) != 4:
+        print('get_range_slices requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_range_slices(eval(args[0]), eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'get_paged_slice':
+    if len(args) != 4:
+        print('get_paged_slice requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_paged_slice(args[0], eval(args[1]), args[2], eval(args[3]),))
+
+elif cmd == 'get_indexed_slices':
+    if len(args) != 4:
+        print('get_indexed_slices requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_indexed_slices(eval(args[0]), eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'insert':
+    if len(args) != 4:
+        print('insert requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.insert(args[0], eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'add':
+    if len(args) != 4:
+        print('add requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.add(args[0], eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'cas':
+    if len(args) != 6:
+        print('cas requires 6 args')
+        sys.exit(1)
+    pp.pprint(client.cas(args[0], args[1], eval(args[2]), eval(args[3]), eval(args[4]), eval(args[5]),))
+
+elif cmd == 'remove':
+    if len(args) != 4:
+        print('remove requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.remove(args[0], eval(args[1]), eval(args[2]), eval(args[3]),))
+
+elif cmd == 'remove_counter':
+    if len(args) != 3:
+        print('remove_counter requires 3 args')
+        sys.exit(1)
+    pp.pprint(client.remove_counter(args[0], eval(args[1]), eval(args[2]),))
+
+elif cmd == 'batch_mutate':
+    if len(args) != 2:
+        print('batch_mutate requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.batch_mutate(eval(args[0]), eval(args[1]),))
+
+elif cmd == 'atomic_batch_mutate':
+    if len(args) != 2:
+        print('atomic_batch_mutate requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.atomic_batch_mutate(eval(args[0]), eval(args[1]),))
+
+elif cmd == 'truncate':
+    if len(args) != 1:
+        print('truncate requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.truncate(args[0],))
+
+elif cmd == 'get_multi_slice':
+    if len(args) != 1:
+        print('get_multi_slice requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.get_multi_slice(eval(args[0]),))
+
+elif cmd == 'describe_schema_versions':
+    if len(args) != 0:
+        print('describe_schema_versions requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_schema_versions())
+
+elif cmd == 'describe_keyspaces':
+    if len(args) != 0:
+        print('describe_keyspaces requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_keyspaces())
+
+elif cmd == 'describe_cluster_name':
+    if len(args) != 0:
+        print('describe_cluster_name requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_cluster_name())
+
+elif cmd == 'describe_version':
+    if len(args) != 0:
+        print('describe_version requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_version())
+
+elif cmd == 'describe_ring':
+    if len(args) != 1:
+        print('describe_ring requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.describe_ring(args[0],))
+
+elif cmd == 'describe_local_ring':
+    if len(args) != 1:
+        print('describe_local_ring requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.describe_local_ring(args[0],))
+
+elif cmd == 'describe_token_map':
+    if len(args) != 0:
+        print('describe_token_map requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_token_map())
+
+elif cmd == 'describe_partitioner':
+    if len(args) != 0:
+        print('describe_partitioner requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_partitioner())
+
+elif cmd == 'describe_snitch':
+    if len(args) != 0:
+        print('describe_snitch requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_snitch())
+
+elif cmd == 'describe_keyspace':
+    if len(args) != 1:
+        print('describe_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.describe_keyspace(args[0],))
+
+elif cmd == 'describe_splits':
+    if len(args) != 4:
+        print('describe_splits requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.describe_splits(args[0], args[1], args[2], eval(args[3]),))
+
+elif cmd == 'trace_next_query':
+    if len(args) != 0:
+        print('trace_next_query requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.trace_next_query())
+
+elif cmd == 'describe_splits_ex':
+    if len(args) != 4:
+        print('describe_splits_ex requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.describe_splits_ex(args[0], args[1], args[2], eval(args[3]),))
+
+elif cmd == 'system_add_column_family':
+    if len(args) != 1:
+        print('system_add_column_family requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_add_column_family(eval(args[0]),))
+
+elif cmd == 'system_drop_column_family':
+    if len(args) != 1:
+        print('system_drop_column_family requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_drop_column_family(args[0],))
+
+elif cmd == 'system_add_keyspace':
+    if len(args) != 1:
+        print('system_add_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_add_keyspace(eval(args[0]),))
+
+elif cmd == 'system_drop_keyspace':
+    if len(args) != 1:
+        print('system_drop_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_drop_keyspace(args[0],))
+
+elif cmd == 'system_update_keyspace':
+    if len(args) != 1:
+        print('system_update_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_update_keyspace(eval(args[0]),))
+
+elif cmd == 'system_update_column_family':
+    if len(args) != 1:
+        print('system_update_column_family requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_update_column_family(eval(args[0]),))
+
+elif cmd == 'execute_cql_query':
+    if len(args) != 2:
+        print('execute_cql_query requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.execute_cql_query(args[0], eval(args[1]),))
+
+elif cmd == 'execute_cql3_query':
+    if len(args) != 3:
+        print('execute_cql3_query requires 3 args')
+        sys.exit(1)
+    pp.pprint(client.execute_cql3_query(args[0], eval(args[1]), eval(args[2]),))
+
+elif cmd == 'prepare_cql_query':
+    if len(args) != 2:
+        print('prepare_cql_query requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.prepare_cql_query(args[0], eval(args[1]),))
+
+elif cmd == 'prepare_cql3_query':
+    if len(args) != 2:
+        print('prepare_cql3_query requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.prepare_cql3_query(args[0], eval(args[1]),))
+
+elif cmd == 'execute_prepared_cql_query':
+    if len(args) != 2:
+        print('execute_prepared_cql_query requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.execute_prepared_cql_query(eval(args[0]), eval(args[1]),))
+
+elif cmd == 'execute_prepared_cql3_query':
+    if len(args) != 3:
+        print('execute_prepared_cql3_query requires 3 args')
+        sys.exit(1)
+    pp.pprint(client.execute_prepared_cql3_query(eval(args[0]), eval(args[1]), eval(args[2]),))
+
+elif cmd == 'set_cql_version':
+    if len(args) != 1:
+        print('set_cql_version requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.set_cql_version(args[0],))
+
+else:
+    print('Unrecognized method %s' % cmd)
+    sys.exit(1)
+
+transport.close()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[15/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/thrift010/constants.py
----------------------------------------------------------------------
diff --git a/thrift_bindings/thrift010/constants.py b/thrift_bindings/thrift010/constants.py
new file mode 100644
index 0000000..97d1f20
--- /dev/null
+++ b/thrift_bindings/thrift010/constants.py
@@ -0,0 +1,13 @@
+#
+# Autogenerated by Thrift Compiler (0.10.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
+from thrift.protocol.TProtocol import TProtocolException
+import sys
+from .ttypes import *
+VERSION = "20.1.0"


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[02/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/thrift_upgrade_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/thrift_upgrade_test.py b/upgrade_tests/thrift_upgrade_test.py
index 0943d8a..42343a2 100644
--- a/upgrade_tests/thrift_upgrade_test.py
+++ b/upgrade_tests/thrift_upgrade_test.py
@@ -1,21 +1,21 @@
-# coding: utf-8
-
 import itertools
-from unittest import skipUnless
+import pytest
+import logging
 
 from cassandra.query import dict_factory
-from nose.tools import assert_equal, assert_not_in
 
-from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester, debug
-from thrift_bindings.v22 import Cassandra
-from thrift_bindings.v22.Cassandra import (Column, ColumnDef,
+from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester
+from thrift_bindings.thrift010 import Cassandra
+from thrift_bindings.thrift010.Cassandra import (Column, ColumnDef,
                                            ColumnParent, ConsistencyLevel,
                                            SlicePredicate, SliceRange)
-from thrift_tests import _i64, get_thrift_client
+from thrift_test import _i64, get_thrift_client
 from tools.assertions import assert_length_equal
-from tools.decorators import since
-from upgrade_base import UpgradeTester
-from upgrade_manifest import build_upgrade_pairs
+from .upgrade_base import UpgradeTester
+from .upgrade_manifest import build_upgrade_pairs
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 def _create_dense_super_cf(name):
@@ -36,20 +36,20 @@ def _create_sparse_super_cf(name):
                            subcomparator_type='AsciiType')
 
 
-def _validate_sparse_cql(cursor, cf='sparse_super_1', column1=u'column1', col1=u'col1', col2=u'col2', key='key'):
+def _validate_sparse_cql(cursor, cf='sparse_super_1', column1='column1', col1='col1', col2='col2', key='key'):
     cursor.execute('use ks')
 
-    assert_equal(list(cursor.execute("SELECT * FROM {}".format(cf))),
+    assert (list(cursor.execute("SELECT * FROM {}".format(cf))) ==
                  [{key: 'k1', column1: 'key1', col1: 200, col2: 300},
                   {key: 'k1', column1: 'key2', col1: 200, col2: 300},
                   {key: 'k2', column1: 'key1', col1: 200, col2: 300},
                   {key: 'k2', column1: 'key2', col1: 200, col2: 300}])
 
-    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1'".format(cf, key))),
+    assert (list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1'".format(cf, key))) ==
                  [{key: 'k1', column1: 'key1', col1: 200, col2: 300},
                   {key: 'k1', column1: 'key2', col1: 200, col2: 300}])
 
-    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k2' AND {} = 'key1'".format(cf, key, column1))),
+    assert (list(cursor.execute("SELECT * FROM {} WHERE {} = 'k2' AND {} = 'key1'".format(cf, key, column1))) ==
                  [{key: 'k2', column1: 'key1', col1: 200, col2: 300}])
 
 
@@ -58,35 +58,35 @@ def _validate_sparse_thrift(client, cf='sparse_super_1'):
     client.set_keyspace('ks')
     result = client.get_slice('k1', ColumnParent(cf), SlicePredicate(slice_range=SliceRange('', '', False, 5)), ConsistencyLevel.ONE)
     assert_length_equal(result, 2)
-    assert_equal(result[0].super_column.name, 'key1')
-    assert_equal(result[1].super_column.name, 'key2')
+    assert result[0].super_column.name == 'key1'
+    assert result[1].super_column.name == 'key2'
 
     for cosc in result:
-        assert_equal(cosc.super_column.columns[0].name, 'col1')
-        assert_equal(cosc.super_column.columns[0].value, _i64(200))
-        assert_equal(cosc.super_column.columns[1].name, 'col2')
-        assert_equal(cosc.super_column.columns[1].value, _i64(300))
-        assert_equal(cosc.super_column.columns[2].name, 'value1')
-        assert_equal(cosc.super_column.columns[2].value, _i64(100))
+        assert cosc.super_column.columns[0].name == 'col1'
+        assert cosc.super_column.columns[0].value == _i64(200)
+        assert cosc.super_column.columns[1].name == 'col2'
+        assert cosc.super_column.columns[1].value == _i64(300)
+        assert cosc.super_column.columns[2].name == 'value1'
+        assert cosc.super_column.columns[2].value == _i64(100)
 
 
-def _validate_dense_cql(cursor, cf='dense_super_1', key=u'key', column1=u'column1', column2=u'column2', value=u'value'):
+def _validate_dense_cql(cursor, cf='dense_super_1', key='key', column1='column1', column2='column2', value='value'):
     cursor.execute('use ks')
 
-    assert_equal(list(cursor.execute("SELECT * FROM {}".format(cf))),
+    assert (list(cursor.execute("SELECT * FROM {}".format(cf))) ==
                  [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'},
                   {key: 'k1', column1: 'key2', column2: 100, value: 'value1'},
                   {key: 'k2', column1: 'key1', column2: 200, value: 'value2'},
                   {key: 'k2', column1: 'key2', column2: 200, value: 'value2'}])
 
-    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1'".format(cf, key))),
+    assert (list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1'".format(cf, key))) ==
                  [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'},
                   {key: 'k1', column1: 'key2', column2: 100, value: 'value1'}])
 
-    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1'".format(cf, key, column1))),
+    assert (list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1'".format(cf, key, column1))) ==
                  [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'}])
 
-    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1' AND {} = 100".format(cf, key, column1, column2))),
+    assert (list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1' AND {} = 100".format(cf, key, column1, column2))) ==
                  [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'}])
 
 
@@ -95,24 +95,25 @@ def _validate_dense_thrift(client, cf='dense_super_1'):
     client.set_keyspace('ks')
     result = client.get_slice('k1', ColumnParent(cf), SlicePredicate(slice_range=SliceRange('', '', False, 5)), ConsistencyLevel.ONE)
     assert_length_equal(result, 2)
-    assert_equal(result[0].super_column.name, 'key1')
-    assert_equal(result[1].super_column.name, 'key2')
+    assert result[0].super_column.name == 'key1'
+    assert result[1].super_column.name == 'key2'
 
-    print(result[0])
-    print(result[1])
+    print((result[0]))
+    print((result[1]))
     for cosc in result:
-        assert_equal(cosc.super_column.columns[0].name, _i64(100))
-        assert_equal(cosc.super_column.columns[0].value, 'value1')
+        assert cosc.super_column.columns[0].name == _i64(100)
+        assert cosc.super_column.columns[0].value == 'value1'
 
 
-class UpgradeSuperColumnsThrough(Tester):
+@pytest.mark.upgrade_test
+class TestUpgradeSuperColumnsThrough(Tester):
     def upgrade_to_version(self, tag, nodes=None):
-        debug('Upgrading to ' + tag)
+        logger.debug('Upgrading to ' + tag)
         if nodes is None:
             nodes = self.cluster.nodelist()
 
         for node in nodes:
-            debug('Shutting down node: ' + node.name)
+            logger.debug('Shutting down node: ' + node.name)
             node.drain()
             node.watch_log_for("DRAINED")
             node.stop(wait_other_notice=False)
@@ -121,12 +122,12 @@ class UpgradeSuperColumnsThrough(Tester):
         for node in nodes:
             node.set_install_dir(version=tag)
             node.set_configuration_options(values={'start_rpc': 'true'})
-            debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
+            logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
         self.cluster.set_install_dir(version=tag)
 
         # Restart nodes on new version
         for node in nodes:
-            debug('Starting %s on new version (%s)' % (node.name, tag))
+            logger.debug('Starting %s on new version (%s)' % (node.name, tag))
             # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
             node.set_log_level("INFO")
             node.start(wait_other_notice=True, wait_for_binary_proto=True)
@@ -145,7 +146,7 @@ class UpgradeSuperColumnsThrough(Tester):
         cluster.start()
         return cluster
 
-    def dense_supercolumn_3_0_created_test(self):
+    def test_dense_supercolumn_3_0_created(self):
         cluster = self.prepare(cassandra_version='github:apache/cassandra-3.0')
         node = self.cluster.nodelist()[0]
         cursor = self.patient_cql_connection(node, row_factory=dict_factory)
@@ -160,7 +161,7 @@ class UpgradeSuperColumnsThrough(Tester):
 
         client.system_add_column_family(_create_dense_super_cf('dense_super_1'))
 
-        for i in xrange(1, 3):
+        for i in range(1, 3):
             client.insert('k1', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(100), 'value1', 0), ConsistencyLevel.ONE)
             client.insert('k2', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(200), 'value2', 0), ConsistencyLevel.ONE)
 
@@ -177,7 +178,7 @@ class UpgradeSuperColumnsThrough(Tester):
         _validate_dense_thrift(client, cf='dense_super_1')
         _validate_dense_cql(cursor, cf='dense_super_1')
 
-    def dense_supercolumn_test(self):
+    def test_dense_supercolumn(self):
         cluster = self.prepare()
         node = self.cluster.nodelist()[0]
         node.nodetool("enablethrift")
@@ -193,7 +194,7 @@ class UpgradeSuperColumnsThrough(Tester):
 
         client.system_add_column_family(_create_dense_super_cf('dense_super_1'))
 
-        for i in xrange(1, 3):
+        for i in range(1, 3):
             client.insert('k1', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(100), 'value1', 0), ConsistencyLevel.ONE)
             client.insert('k2', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(200), 'value2', 0), ConsistencyLevel.ONE)
 
@@ -218,7 +219,7 @@ class UpgradeSuperColumnsThrough(Tester):
         _validate_dense_thrift(client, cf='dense_super_1')
         _validate_dense_cql(cursor, cf='dense_super_1')
 
-    def sparse_supercolumn_test(self):
+    def test_sparse_supercolumn(self):
         cluster = self.prepare()
         node = self.cluster.nodelist()[0]
         node.nodetool("enablethrift")
@@ -235,7 +236,7 @@ class UpgradeSuperColumnsThrough(Tester):
         cf = _create_sparse_super_cf('sparse_super_2')
         client.system_add_column_family(cf)
 
-        for i in xrange(1, 3):
+        for i in range(1, 3):
             client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("value1", _i64(100), 0), ConsistencyLevel.ONE)
             client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col1", _i64(200), 0), ConsistencyLevel.ONE)
             client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col2", _i64(300), 0), ConsistencyLevel.ONE)
@@ -266,6 +267,7 @@ class UpgradeSuperColumnsThrough(Tester):
         _validate_sparse_cql(cursor, cf='sparse_super_2')
 
 
+@pytest.mark.upgrade_test
 @since('2.1', max_version='4.0.0')
 class TestThrift(UpgradeTester):
     """
@@ -275,7 +277,7 @@ class TestThrift(UpgradeTester):
     @jira_ticket CASSANDRA-12373
     """
 
-    def dense_supercolumn_test(self):
+    def test_dense_supercolumn(self):
         cursor = self.prepare(nodes=2, rf=2, row_factory=dict_factory)
         cluster = self.cluster
 
@@ -289,7 +291,7 @@ class TestThrift(UpgradeTester):
 
         client.system_add_column_family(_create_dense_super_cf('dense_super_1'))
 
-        for i in xrange(1, 3):
+        for i in range(1, 3):
             client.insert('k1', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(100), 'value1', 0), ConsistencyLevel.ONE)
             client.insert('k2', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(200), 'value2', 0), ConsistencyLevel.ONE)
 
@@ -297,12 +299,12 @@ class TestThrift(UpgradeTester):
         _validate_dense_thrift(client)
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             client = get_thrift_client(host, port)
             _validate_dense_cql(cursor)
             _validate_dense_thrift(client)
 
-    def dense_supercolumn_test_with_renames(self):
+    def test_dense_supercolumn_with_renames(self):
         cursor = self.prepare(row_factory=dict_factory)
         cluster = self.cluster
 
@@ -317,7 +319,7 @@ class TestThrift(UpgradeTester):
 
         client.system_add_column_family(_create_dense_super_cf('dense_super_2'))
 
-        for i in xrange(1, 3):
+        for i in range(1, 3):
             client.insert('k1', ColumnParent('dense_super_2', 'key{}'.format(i)), Column(_i64(100), 'value1', 0), ConsistencyLevel.ONE)
             client.insert('k2', ColumnParent('dense_super_2', 'key{}'.format(i)), Column(_i64(200), 'value2', 0), ConsistencyLevel.ONE)
 
@@ -326,16 +328,16 @@ class TestThrift(UpgradeTester):
         cursor.execute("ALTER TABLE ks.dense_super_2 RENAME column2 TO renamed_column2")
         cursor.execute("ALTER TABLE ks.dense_super_2 RENAME value TO renamed_value")
 
-        _validate_dense_cql(cursor, cf='dense_super_2', key=u'renamed_key', column1=u'renamed_column1', column2=u'renamed_column2', value=u'renamed_value')
+        _validate_dense_cql(cursor, cf='dense_super_2', key='renamed_key', column1='renamed_column1', column2='renamed_column2', value='renamed_value')
         _validate_dense_thrift(client, cf='dense_super_2')
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             client = get_thrift_client(host, port)
-            _validate_dense_cql(cursor, cf='dense_super_2', key=u'renamed_key', column1=u'renamed_column1', column2=u'renamed_column2', value=u'renamed_value')
+            _validate_dense_cql(cursor, cf='dense_super_2', key='renamed_key', column1='renamed_column1', column2='renamed_column2', value='renamed_value')
             _validate_dense_thrift(client, cf='dense_super_2')
 
-    def sparse_supercolumn_test_with_renames(self):
+    def test_sparse_supercolumn_with_renames(self):
         cursor = self.prepare(row_factory=dict_factory)
         cluster = self.cluster
 
@@ -354,7 +356,7 @@ class TestThrift(UpgradeTester):
         cursor.execute("ALTER TABLE ks.sparse_super_1 RENAME key TO renamed_key")
         cursor.execute("ALTER TABLE ks.sparse_super_1 RENAME column1 TO renamed_column1")
 
-        for i in xrange(1, 3):
+        for i in range(1, 3):
             client.insert('k1', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("value1", _i64(100), 0), ConsistencyLevel.ONE)
             client.insert('k1', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("col1", _i64(200), 0), ConsistencyLevel.ONE)
             client.insert('k1', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("col2", _i64(300), 0), ConsistencyLevel.ONE)
@@ -364,15 +366,15 @@ class TestThrift(UpgradeTester):
             client.insert('k2', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("col2", _i64(300), 0), ConsistencyLevel.ONE)
 
         _validate_sparse_thrift(client)
-        _validate_sparse_cql(cursor, column1=u'renamed_column1', key=u'renamed_key')
+        _validate_sparse_cql(cursor, column1='renamed_column1', key='renamed_key')
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             client = get_thrift_client(host, port)
-            _validate_sparse_cql(cursor, column1=u'renamed_column1', key=u'renamed_key')
+            _validate_sparse_cql(cursor, column1='renamed_column1', key='renamed_key')
             _validate_sparse_thrift(client)
 
-    def sparse_supercolumn_test(self):
+    def test_sparse_supercolumn(self):
         cursor = self.prepare(row_factory=dict_factory)
         cluster = self.cluster
 
@@ -388,7 +390,7 @@ class TestThrift(UpgradeTester):
         cf = _create_sparse_super_cf('sparse_super_2')
         client.system_add_column_family(cf)
 
-        for i in xrange(1, 3):
+        for i in range(1, 3):
             client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("value1", _i64(100), 0), ConsistencyLevel.ONE)
             client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col1", _i64(200), 0), ConsistencyLevel.ONE)
             client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col2", _i64(300), 0), ConsistencyLevel.ONE)
@@ -401,7 +403,7 @@ class TestThrift(UpgradeTester):
         _validate_sparse_cql(cursor, cf='sparse_super_2')
 
         for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             client = get_thrift_client(host, port)
             _validate_sparse_thrift(client, cf='sparse_super_2')
             _validate_sparse_cql(cursor, cf='sparse_super_2')
@@ -422,7 +424,9 @@ for spec in specs:
                                                         rf=spec['RF'],
                                                         pathname=spec['UPGRADE_PATH'].name)
     gen_class_name = TestThrift.__name__ + suffix
-    assert_not_in(gen_class_name, globals())
+    assert gen_class_name not in globals()
 
     upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or spec['UPGRADE_PATH'].upgrade_meta.matches_current_env_version_family
-    globals()[gen_class_name] = skipUnless(upgrade_applies_to_env, 'test not applicable to env.')(type(gen_class_name, (TestThrift,), spec))
+    if not upgrade_applies_to_env:
+        pytest.mark.skip(reason='test not applicable to env.')
+    globals()[gen_class_name] = type(gen_class_name, (TestThrift,), spec)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/upgrade_base.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/upgrade_base.py b/upgrade_tests/upgrade_base.py
index 484c4bf..cf5d46f 100644
--- a/upgrade_tests/upgrade_base.py
+++ b/upgrade_tests/upgrade_base.py
@@ -1,13 +1,17 @@
 import os
 import sys
 import time
+import pytest
+import logging
+
 from abc import ABCMeta
-from unittest import skipIf
 
 from ccmlib.common import get_version_from_build, is_win
 from tools.jmxutils import remove_perf_disable_shared_mem
 
-from dtest import CASSANDRA_VERSION_FROM_BUILD, TRACE, DEBUG, Tester, debug, create_ks
+from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester, create_ks
+
+logger = logging.getLogger(__name__)
 
 
 def switch_jdks(major_version_int):
@@ -25,12 +29,13 @@ def switch_jdks(major_version_int):
     # don't change if the same version was requested
     current_java_home = os.environ.get('JAVA_HOME')
     if current_java_home != os.environ[new_java_home]:
-        debug("Switching jdk to version {} (JAVA_HOME is changing from {} to {})".format(major_version_int, current_java_home or 'undefined', os.environ[new_java_home]))
+        logger.debug("Switching jdk to version {} (JAVA_HOME is changing from {} to {})".format(major_version_int, current_java_home or 'undefined', os.environ[new_java_home]))
         os.environ['JAVA_HOME'] = os.environ[new_java_home]
 
 
-@skipIf(sys.platform == 'win32', 'Skip upgrade tests on Windows')
-class UpgradeTester(Tester):
+@pytest.mark.upgrade_test
+@pytest.mark.skipif(sys.platform == 'win32', reason='Skip upgrade tests on Windows')
+class UpgradeTester(Tester, metaclass=ABCMeta):
     """
     When run in 'normal' upgrade mode without specifying any version to run,
     this will test different upgrade paths depending on what version of C* you
@@ -38,35 +43,27 @@ class UpgradeTester(Tester):
     When run on 3.0, this will test the upgrade path to trunk. When run on
     versions above 3.0, this will test the upgrade path from 3.0 to HEAD.
     """
-    # make this an abc so we can get all subclasses with __subclasses__()
-    __metaclass__ = ABCMeta
     NODES, RF, __test__, CL, UPGRADE_PATH = 2, 1, False, None, None
 
-    # known non-critical bug during teardown:
-    # https://issues.apache.org/jira/browse/CASSANDRA-12340
-    if CASSANDRA_VERSION_FROM_BUILD < '2.2':
-        _known_teardown_race_error = (
-            'ScheduledThreadPoolExecutor$ScheduledFutureTask@[0-9a-f]+ '
-            'rejected from org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor'
-        )
-        # don't alter ignore_log_patterns on the class, just the obj for this test
-        ignore_log_patterns = [_known_teardown_race_error]
-
-    def __init__(self, *args, **kwargs):
-        try:
-            self.ignore_log_patterns
-        except AttributeError:
-            self.ignore_log_patterns = []
-
-        self.ignore_log_patterns = self.ignore_log_patterns[:] + [
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        # known non-critical bug during teardown:
+        # https://issues.apache.org/jira/browse/CASSANDRA-12340
+        if CASSANDRA_VERSION_FROM_BUILD < '2.2':
+            _known_teardown_race_error = (
+                'ScheduledThreadPoolExecutor$ScheduledFutureTask@[0-9a-f]+ '
+                'rejected from org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor'
+            )
+            fixture_dtest_setup.ignore_log_patterns = fixture_dtest_setup.ignore_log_patterns \
+                                                      + [_known_teardown_race_error]
+
+        fixture_dtest_setup.ignore_log_patterns = fixture_dtest_setup.ignore_log_patterns + [
             r'RejectedExecutionException.*ThreadPoolExecutor has shut down',  # see  CASSANDRA-12364
         ]
-        self.enable_for_jolokia = False
-        super(UpgradeTester, self).__init__(*args, **kwargs)
 
     def setUp(self):
         self.validate_class_config()
-        debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
+        logger.debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
               .format(self.UPGRADE_PATH.starting_version, self.UPGRADE_PATH.starting_meta.java_version))
         switch_jdks(self.UPGRADE_PATH.starting_meta.java_version)
         os.environ['CASSANDRA_VERSION'] = self.UPGRADE_PATH.starting_version
@@ -80,7 +77,7 @@ class UpgradeTester(Tester):
         cl = self.CL if cl is None else cl
         self.CL = cl  # store for later use in do_upgrade
 
-        self.assertGreaterEqual(nodes, 2, "backwards compatibility tests require at least two nodes")
+        assert nodes, 2 >= "backwards compatibility tests require at least two nodes"
 
         self.protocol_version = protocol_version
 
@@ -104,8 +101,8 @@ class UpgradeTester(Tester):
         cluster.populate(nodes)
         node1 = cluster.nodelist()[0]
         cluster.set_install_dir(version=self.UPGRADE_PATH.starting_version)
-        self.enable_for_jolokia = kwargs.pop('jolokia', False)
-        if self.enable_for_jolokia:
+        self.fixture_dtest_setup.enable_for_jolokia = kwargs.pop('jolokia', False)
+        if self.fixture_dtest_setup.enable_for_jolokia:
             remove_perf_disable_shared_mem(node1)
 
         cluster.start(wait_for_binary_proto=True)
@@ -147,7 +144,7 @@ class UpgradeTester(Tester):
         if is_win() and self.cluster.version() <= '2.2':
             node1.mark_log_for_errors()
 
-        debug('upgrading node1 to {}'.format(self.UPGRADE_PATH.upgrade_version))
+        logger.debug('upgrading node1 to {}'.format(self.UPGRADE_PATH.upgrade_version))
         switch_jdks(self.UPGRADE_PATH.upgrade_meta.java_version)
 
         node1.set_install_dir(version=self.UPGRADE_PATH.upgrade_version)
@@ -159,18 +156,19 @@ class UpgradeTester(Tester):
         # The since decorator can only check the starting version of the upgrade,
         # so here we check to new version of the upgrade as well.
         if hasattr(self, 'max_version') and self.max_version is not None and new_version_from_build >= self.max_version:
-            self.skip("Skipping test, new version {} is equal to or higher than max version {}".format(new_version_from_build, self.max_version))
+            pytest.skip("Skipping test, new version {} is equal to or higher than "
+                        "max version {}".format(new_version_from_build, self.max_version))
 
         if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
-            self.skip('Protocol version {} incompatible '
-                      'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
-        node1.set_log_level("DEBUG" if DEBUG else "TRACE" if TRACE else "INFO")
+            pytest.skip('Protocol version {} incompatible '
+                        'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
+        node1.set_log_level(logging.getLevelName(logging.root.level))
         node1.set_configuration_options(values={'internode_compression': 'none'})
 
         if use_thrift:
             node1.set_configuration_options(values={'start_rpc': 'true'})
 
-        if self.enable_for_jolokia:
+        if self.fixture_dtest_setup.enable_for_jolokia:
             remove_perf_disable_shared_mem(node1)
 
         node1.start(wait_for_binary_proto=True, wait_other_notice=True)
@@ -223,7 +221,7 @@ class UpgradeTester(Tester):
         Used in places where is_upgraded was used to determine if the node version was >=2.2.
         """
         node_versions = self.get_node_versions()
-        self.assertLessEqual(len({v.vstring for v in node_versions}), 2)
+        assert len({v.vstring for v in node_versions}) <= 2
         return max(node_versions) if is_upgraded else min(node_versions)
 
     def tearDown(self):
@@ -246,4 +244,4 @@ class UpgradeTester(Tester):
              if subclasses else
              '')
         )
-        self.assertIsNotNone(self.UPGRADE_PATH, no_upgrade_path_error)
+        assert self.UPGRADE_PATH is not None, no_upgrade_path_error

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/upgrade_compact_storage.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/upgrade_compact_storage.py b/upgrade_tests/upgrade_compact_storage.py
index 085d3a3..ed85515 100644
--- a/upgrade_tests/upgrade_compact_storage.py
+++ b/upgrade_tests/upgrade_compact_storage.py
@@ -1,28 +1,30 @@
-# coding: utf-8
-
 import time
+import pytest
+import logging
 
 from cassandra.query import dict_factory
-from nose.tools import assert_equal, assert_true
 from ccmlib.node import NodeError
 
-from dtest import Tester, debug
+from dtest import Tester
 from cassandra.protocol import ConfigurationException
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 VERSION_311 = 'github:apache/cassandra-3.11'
 VERSION_TRUNK = 'github:apache/trunk'
 
 
+@pytest.mark.upgrade_test
 @since('4.0')
-class UpgradeSuperColumnsThrough(Tester):
+class TestUpgradeSuperColumnsThrough(Tester):
     def upgrade_to_version(self, tag, start_rpc=True, wait=True, nodes=None):
-        debug('Upgrading to ' + tag)
+        logger.debug('Upgrading to ' + tag)
         if nodes is None:
             nodes = self.cluster.nodelist()
 
         for node in nodes:
-            debug('Shutting down node: ' + node.name)
+            logger.debug('Shutting down node: ' + node.name)
             node.drain()
             node.watch_log_for("DRAINED")
             node.stop(wait_other_notice=False)
@@ -30,12 +32,12 @@ class UpgradeSuperColumnsThrough(Tester):
         # Update Cassandra Directory
         for node in nodes:
             node.set_install_dir(version=tag)
-            debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
+            logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
         self.cluster.set_install_dir(version=tag)
 
         # Restart nodes on new version
         for node in nodes:
-            debug('Starting %s on new version (%s)' % (node.name, tag))
+            logger.debug('Starting %s on new version (%s)' % (node.name, tag))
             node.start(wait_other_notice=wait, wait_for_binary_proto=wait)
 
     def prepare(self, num_nodes=1, cassandra_version="github:apache/cassandra-2.2"):
@@ -49,7 +51,7 @@ class UpgradeSuperColumnsThrough(Tester):
         cluster.start()
         return cluster
 
-    def upgrade_compact_storage_test(self):
+    def test_upgrade_compact_storage(self):
         cluster = self.prepare(cassandra_version='github:apache/cassandra-3.0')
         node = self.cluster.nodelist()[0]
         session = self.patient_cql_connection(node, row_factory=dict_factory)
@@ -57,18 +59,18 @@ class UpgradeSuperColumnsThrough(Tester):
         session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };")
         session.execute("CREATE TABLE ks.compact_table (pk int PRIMARY KEY, col1 int, col2 int) WITH COMPACT STORAGE")
 
-        for i in xrange(1, 5):
+        for i in range(1, 5):
             session.execute("INSERT INTO ks.compact_table (pk, col1, col2) VALUES ({i}, {i}, {i})".format(i=i))
 
         self.upgrade_to_version(VERSION_TRUNK, wait=False)
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
 
         time.sleep(5)
         # After restart, it won't start
         errors = len(node.grep_log("Compact Tables are not allowed in Cassandra starting with 4.0 version"))
-        assert_true(errors > 0)
+        assert errors > 0
 
-    def mixed_cluster_test(self):
+    def test_mixed_cluster(self):
         cluster = self.prepare(num_nodes=2, cassandra_version=VERSION_311)
         node1, node2 = self.cluster.nodelist()
 
@@ -88,9 +90,9 @@ class UpgradeSuperColumnsThrough(Tester):
         except ConfigurationException:
             thrown = True
 
-        assert_true(thrown)
+        assert thrown
 
-    def upgrade_with_dropped_compact_storage_test(self):
+    def test_upgrade_with_dropped_compact_storage(self):
         cluster = self.prepare(cassandra_version=VERSION_311)
         node = self.cluster.nodelist()[0]
         session = self.patient_cql_connection(node, row_factory=dict_factory)
@@ -98,7 +100,7 @@ class UpgradeSuperColumnsThrough(Tester):
         session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };")
         session.execute("CREATE TABLE ks.compact_table (pk int PRIMARY KEY, col1 int, col2 int) WITH COMPACT STORAGE")
 
-        for i in xrange(1, 5):
+        for i in range(1, 5):
             session.execute("INSERT INTO ks.compact_table (pk, col1, col2) VALUES ({i}, {i}, {i})".format(i=i))
 
         session.execute("ALTER TABLE ks.compact_table DROP COMPACT STORAGE")
@@ -106,10 +108,10 @@ class UpgradeSuperColumnsThrough(Tester):
         self.upgrade_to_version(VERSION_TRUNK, wait=True)
 
         session = self.patient_cql_connection(node, row_factory=dict_factory)
-        assert_equal(list(session.execute("SELECT * FROM ks.compact_table WHERE pk = 1")),
-                     [{u'col2': 1, u'pk': 1, u'column1': None, u'value': None, u'col1': 1}])
+        assert (list(session.execute("SELECT * FROM ks.compact_table WHERE pk = 1")) ==
+                     [{'col2': 1, 'pk': 1, 'column1': None, 'value': None, 'col1': 1}])
 
-    def force_readd_compact_storage_test(self):
+    def test_force_readd_compact_storage(self):
         cluster = self.prepare(cassandra_version=VERSION_311)
         node = self.cluster.nodelist()[0]
         session = self.patient_cql_connection(node, row_factory=dict_factory)
@@ -117,7 +119,7 @@ class UpgradeSuperColumnsThrough(Tester):
         session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };")
         session.execute("CREATE TABLE ks.compact_table (pk int PRIMARY KEY, col1 int, col2 int) WITH COMPACT STORAGE")
 
-        for i in xrange(1, 5):
+        for i in range(1, 5):
             session.execute("INSERT INTO ks.compact_table (pk, col1, col2) VALUES ({i}, {i}, {i})".format(i=i))
 
         session.execute("ALTER TABLE ks.compact_table DROP COMPACT STORAGE")
@@ -127,23 +129,23 @@ class UpgradeSuperColumnsThrough(Tester):
         session = self.patient_cql_connection(node, row_factory=dict_factory)
         session.execute("update system_schema.tables set flags={} where keyspace_name='ks' and table_name='compact_table';")
 
-        assert_equal(list(session.execute("SELECT * FROM ks.compact_table WHERE pk = 1")),
-                     [{u'col2': 1, u'pk': 1, u'column1': None, u'value': None, u'col1': 1}])
+        assert (list(session.execute("SELECT * FROM ks.compact_table WHERE pk = 1")) ==
+                     [{'col2': 1, 'pk': 1, 'column1': None, 'value': None, 'col1': 1}])
 
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
 
         node.stop(wait_other_notice=False)
         node.set_install_dir(version=VERSION_TRUNK)
         try:
             node.start(wait_other_notice=False, wait_for_binary_proto=False, verbose=False)
         except (NodeError):
-            print "error"  # ignore
+            print("error")  # ignore
         time.sleep(5)
         # After restart, it won't start
         errors = len(node.grep_log("Compact Tables are not allowed in Cassandra starting with 4.0 version"))
-        assert_true(errors > 0)
+        assert errors > 0
 
-    def upgrade_with_dropped_compact_storage_index_test(self):
+    def test_upgrade_with_dropped_compact_storage_index(self):
         cluster = self.prepare(cassandra_version=VERSION_311)
         node = self.cluster.nodelist()[0]
         session = self.patient_cql_connection(node, row_factory=dict_factory)
@@ -152,25 +154,25 @@ class UpgradeSuperColumnsThrough(Tester):
         session.execute("CREATE TABLE ks.compact_table (pk ascii PRIMARY KEY, col1 ascii) WITH COMPACT STORAGE")
         session.execute("CREATE INDEX ON ks.compact_table(col1)")
 
-        for i in xrange(1, 10):
+        for i in range(1, 10):
             session.execute("INSERT INTO ks.compact_table (pk, col1) VALUES ('{pk}', '{col1}')".format(pk=i, col1=i * 10))
 
-        assert_equal(list(session.execute("SELECT * FROM ks.compact_table WHERE col1 = '50'")),
-                     [{u'pk': '5', u'col1': '50'}])
-        assert_equal(list(session.execute("SELECT * FROM ks.compact_table WHERE pk = '5'")),
-                     [{u'pk': '5', u'col1': '50'}])
+        assert (list(session.execute("SELECT * FROM ks.compact_table WHERE col1 = '50'")) ==
+                     [{'pk': '5', 'col1': '50'}])
+        assert (list(session.execute("SELECT * FROM ks.compact_table WHERE pk = '5'")) ==
+                     [{'pk': '5', 'col1': '50'}])
         session.execute("ALTER TABLE ks.compact_table DROP COMPACT STORAGE")
 
-        assert_equal(list(session.execute("SELECT * FROM ks.compact_table WHERE col1 = '50'")),
-                     [{u'col1': '50', u'column1': None, u'pk': '5', u'value': None}])
-        assert_equal(list(session.execute("SELECT * FROM ks.compact_table WHERE pk = '5'")),
-                     [{u'col1': '50', u'column1': None, u'pk': '5', u'value': None}])
+        assert (list(session.execute("SELECT * FROM ks.compact_table WHERE col1 = '50'")) ==
+                     [{'col1': '50', 'column1': None, 'pk': '5', 'value': None}])
+        assert (list(session.execute("SELECT * FROM ks.compact_table WHERE pk = '5'")) ==
+                     [{'col1': '50', 'column1': None, 'pk': '5', 'value': None}])
 
         self.upgrade_to_version(VERSION_TRUNK, wait=True)
 
         session = self.patient_cql_connection(node, row_factory=dict_factory)
 
-        assert_equal(list(session.execute("SELECT * FROM ks.compact_table WHERE col1 = '50'")),
-                     [{u'col1': '50', u'column1': None, u'pk': '5', u'value': None}])
-        assert_equal(list(session.execute("SELECT * FROM ks.compact_table WHERE pk = '5'")),
-                     [{u'col1': '50', u'column1': None, u'pk': '5', u'value': None}])
+        assert (list(session.execute("SELECT * FROM ks.compact_table WHERE col1 = '50'")) ==
+                     [{'col1': '50', 'column1': None, 'pk': '5', 'value': None}])
+        assert (list(session.execute("SELECT * FROM ks.compact_table WHERE pk = '5'")) ==
+                     [{'col1': '50', 'column1': None, 'pk': '5', 'value': None}])

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/upgrade_manifest.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/upgrade_manifest.py b/upgrade_tests/upgrade_manifest.py
index d5ed776..ce9442f 100644
--- a/upgrade_tests/upgrade_manifest.py
+++ b/upgrade_tests/upgrade_manifest.py
@@ -1,7 +1,11 @@
+import logging
+
 from collections import namedtuple
 
 from dtest import (CASSANDRA_GITREF, CASSANDRA_VERSION_FROM_BUILD,
-                   RUN_STATIC_UPGRADE_MATRIX, debug)
+                   RUN_STATIC_UPGRADE_MATRIX)
+
+logger = logging.getLogger(__name__)
 
 # UpgradePath's contain data about upgrade paths we wish to test
 # They also contain VersionMeta's for each version the path is testing
@@ -156,18 +160,18 @@ def build_upgrade_pairs():
     valid_upgrade_pairs = []
     manifest = OVERRIDE_MANIFEST or MANIFEST
 
-    for origin_meta, destination_metas in manifest.items():
+    for origin_meta, destination_metas in list(manifest.items()):
         for destination_meta in destination_metas:
             if not (origin_meta and destination_meta):  # None means we don't care about that version, which means we don't care about iterations involving it either
-                debug("skipping class creation as a version is undefined (this is normal), versions: {} and {}".format(origin_meta, destination_meta))
+                logger.debug("skipping class creation as a version is undefined (this is normal), versions: {} and {}".format(origin_meta, destination_meta))
                 continue
 
             if not _is_targeted_variant_combo(origin_meta, destination_meta):
-                debug("skipping class creation, no testing of '{}' to '{}' (for {} upgrade to {})".format(origin_meta.variant, destination_meta.variant, origin_meta.name, destination_meta.name))
+                logger.debug("skipping class creation, no testing of '{}' to '{}' (for {} upgrade to {})".format(origin_meta.variant, destination_meta.variant, origin_meta.name, destination_meta.name))
                 continue
 
             if not _have_common_proto(origin_meta, destination_meta):
-                debug("skipping class creation, no compatible protocol version between {} and {}".format(origin_meta.name, destination_meta.name))
+                logger.debug("skipping class creation, no compatible protocol version between {} and {}".format(origin_meta.name, destination_meta.name))
                 continue
 
             path_name = 'Upgrade_' + origin_meta.name + '_To_' + destination_meta.name
@@ -177,7 +181,7 @@ def build_upgrade_pairs():
                     # looks like this test should actually run in the current env, so let's set the final version to match the env exactly
                     oldmeta = destination_meta
                     newmeta = destination_meta.clone_with_local_env_version()
-                    debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(path_name, oldmeta.version, newmeta.version))
+                    logger.debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(path_name, oldmeta.version, newmeta.version))
                     destination_meta = newmeta
 
             valid_upgrade_pairs.append(

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/upgrade_schema_agreement_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/upgrade_schema_agreement_test.py b/upgrade_tests/upgrade_schema_agreement_test.py
index 8541411..4c8d942 100644
--- a/upgrade_tests/upgrade_schema_agreement_test.py
+++ b/upgrade_tests/upgrade_schema_agreement_test.py
@@ -1,8 +1,13 @@
 import re
 import time
+import pytest
+import logging
 
 from ccmlib.node import TimeoutError
-from dtest import Tester, debug
+from dtest import Tester
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestSchemaAgreementUpgrade(Tester):
@@ -33,15 +38,14 @@ class TestSchemaAgreementUpgrade(Tester):
     # The number of seconds we wait for schema migration log entries to verify
     migration_check_time = 30
 
-    def __init__(self, *args, **kwargs):
-        self.ignore_log_patterns = [
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
             # This one occurs if we do a non-rolling upgrade, the node
             # it's trying to send the migration to hasn't started yet,
             # and when it does, it gets replayed and everything is fine.
             r'Can\'t send migration request: node.*is down',
-        ]
-
-        Tester.__init__(self, *args, **kwargs)
+        )
 
     def _prepare(self, version, num_nodes=3):
         cluster = self.cluster
@@ -65,7 +69,7 @@ class TestSchemaAgreementUpgrade(Tester):
         expressions = [" - [pP]ulling schema from endpoint",
                        " - [Ss]ubmitting migration task",
                        " - [Pp]ulled schema from endpoint"]
-        debug("Inspecting log files of {}...".format([n.name for n in nodes]))
+        logger.debug("Inspecting log files of {}...".format([n.name for n in nodes]))
         all_matchings = ""
         for node in nodes:
             try:
@@ -73,11 +77,11 @@ class TestSchemaAgreementUpgrade(Tester):
                 all_matchings = all_matchings + "\n{}: {}".format(node.name, matchings)
             except TimeoutError:
                 # good
-                debug("  {}: log files don't show schema migration messages (good)".format(node.name))
+                logger.debug("  {}: log files don't show schema migration messages (good)".format(node.name))
         if all_matchings != "":
             msg = "Expected no schema migration log entries, but got:{}".format(all_matchings)
-            debug(msg)  # debug message for the validation test case (3.0 vs 3.11.1)
-            self.fail(msg)
+            logger.debug(msg)  # debug message for the validation test case (3.0 vs 3.11.1)
+            pytest.fail(msg)
 
     def _wait_for_status_normal(self, node, mark):
         # Wait until the node is in state NORMAL (otherwise we can expect
@@ -86,13 +90,13 @@ class TestSchemaAgreementUpgrade(Tester):
                            from_mark=mark, timeout=300, filename='debug.log')
 
     def _bounce_node(self, node):
-        debug("Bouncing {}...".format(node.name))
-        debug("  Stopping...")
+        logger.debug("Bouncing {}...".format(node.name))
+        logger.debug("  Stopping...")
         node.stop(wait_other_notice=False)  # intentionally set to wait_other_notice=False
         mark = node.mark_log(filename='debug.log')
-        debug("  Starting...")
+        logger.debug("  Starting...")
         node.start(wait_other_notice=False)  # intentionally set to wait_other_notice=False
-        debug("  Waiting for status NORMAL...")
+        logger.debug("  Waiting for status NORMAL...")
         self._wait_for_status_normal(node, mark)
 
     def _min_version(self, nodes):
@@ -103,7 +107,7 @@ class TestSchemaAgreementUpgrade(Tester):
         min_version = 99.9
         for node in nodes:
             short_version = node.get_base_cassandra_version()
-            debug("{} is on {} ({})".format(node.name, short_version, node.get_cassandra_version()))
+            logger.debug("{} is on {} ({})".format(node.name, short_version, node.get_cassandra_version()))
             if short_version < min_version:
                 min_version = short_version
         return min_version
@@ -131,29 +135,29 @@ class TestSchemaAgreementUpgrade(Tester):
         """
 
         # prepare the cluster with initial version from the upgrade path
-        debug('Starting upgrade test with {}'.format(upgrade_path[0][1]))
+        logger.debug('Starting upgrade test with {}'.format(upgrade_path[0][1]))
         cluster = self._prepare(version=upgrade_path[0][1])
 
         nodes = self.cluster.nodelist()
 
         # perform _rolling_ upgrades from one version to another
         for (gossip_log_with_product_version, version) in upgrade_path[1:]:
-            debug("")
-            debug("Upgrading cluster to {}".format(version))
+            logger.debug("")
+            logger.debug("Upgrading cluster to {}".format(version))
             cluster.set_install_dir(version=version)
 
             for node in nodes:
                 other_nodes = [n for n in nodes if n != node]
 
-                debug("")
-                debug("Stopping {} for upgrade...".format(node.name))
+                logger.debug("")
+                logger.debug("Stopping {} for upgrade...".format(node.name))
                 # needed to "patch" the config file (especially since 4.0) and get the correct version number
                 node.set_install_dir(version=version)
                 node.stop(wait_other_notice=False)  # intentionally set to wait_other_notice=False
 
                 # remember the logfile-mark when the node was upgraded
                 upgrade_log_mark = node.mark_log(filename='debug.log')
-                debug("Starting upgraded {}...".format(node.name))
+                logger.debug("Starting upgraded {}...".format(node.name))
                 node.start(wait_other_notice=False)  # intentionally set to wait_other_notice=False
 
                 # wait until the upgraded node is in status NORMAL
@@ -161,25 +165,25 @@ class TestSchemaAgreementUpgrade(Tester):
 
                 # If it's a 3.11.2 node, check that the correct schema version is announced
                 min_version = self._min_version(nodes)
-                debug("Minimum version: {}".format(min_version))
+                logger.debug("Minimum version: {}".format(min_version))
                 if gossip_log_with_product_version:
                     # 3.11.2 nodes (and only 3.11.2) indicate whether they announce
                     # a "3.0 compatible" or "real" "3.11" schema version.
                     watch_part = "Gossiping my {} schema version".format("3.0 compatible" if min_version == 3.0 else "3.11")
-                    debug("Inspecting log for '{}'...".format(watch_part))
+                    logger.debug("Inspecting log for '{}'...".format(watch_part))
                     matchings = node.watch_log_for(watch_part, from_mark=upgrade_log_mark, timeout=120, filename='debug.log')
-                    debug("  Found: {}".format(matchings))
+                    logger.debug("  Found: {}".format(matchings))
 
                 # Only log the schema information for debug purposes here. Primarily want to catch the
                 # schema migration race.
                 for n in nodes:
                     out, _, _ = n.nodetool("describecluster")
-                    debug("nodetool describecluster of {}:".format(n.name))
-                    debug(out)
+                    logger.debug("nodetool describecluster of {}:".format(n.name))
+                    logger.debug(out)
 
                 # We expect no schema migrations at this point.
                 self._set_verify_log_mark(other_nodes)
-                debug("  Sleep for {} seconds...".format(self.migration_check_time))
+                logger.debug("  Sleep for {} seconds...".format(self.migration_check_time))
                 time.sleep(self.migration_check_time)
                 self._expect_no_schema_migrations(other_nodes)
 
@@ -188,11 +192,11 @@ class TestSchemaAgreementUpgrade(Tester):
                 # the whole endpoint state to propagate - including the schema version, which, in theory,
                 # should trigger the race.
                 # It is expected, that the _other_ nodes do not try to pull the schema.
-                debug("")
-                debug("Try to trigger schema migration race by bouncing the upgraded node")
+                logger.debug("")
+                logger.debug("Try to trigger schema migration race by bouncing the upgraded node")
                 self._bounce_node(node)
                 self._set_verify_log_mark(other_nodes)
-                debug("  Sleep for {} seconds...".format(self.migration_check_time))
+                logger.debug("  Sleep for {} seconds...".format(self.migration_check_time))
                 time.sleep(self.migration_check_time)
                 self._expect_no_schema_migrations(other_nodes)
 
@@ -201,8 +205,8 @@ class TestSchemaAgreementUpgrade(Tester):
                 # only want to have one schema version.
                 for n in nodes:
                     out, _, _ = n.nodetool("describecluster")
-                    debug("nodetool describecluster of {}:".format(n.name))
-                    debug(out)
+                    logger.debug("nodetool describecluster of {}:".format(n.name))
+                    logger.debug(out)
                     versions = out.split('Schema versions:')[1].strip()
                     num_schemas = len(re.findall('\[.*?\]', versions))
                     self.assertEqual(num_schemas, 1, "Multiple schema versions detected on {}: {}".format(n.name, out))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/upgrade_supercolumns_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/upgrade_supercolumns_test.py b/upgrade_tests/upgrade_supercolumns_test.py
index 6ba9db1..28bf47c 100644
--- a/upgrade_tests/upgrade_supercolumns_test.py
+++ b/upgrade_tests/upgrade_supercolumns_test.py
@@ -1,12 +1,24 @@
 import os
+import pytest
+import logging
 
-from collections import OrderedDict
-
-from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester, debug
-from pycassa.pool import ConnectionPool
-from pycassa.columnfamily import ColumnFamily
+from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester
+from thrift_test import get_thrift_client
 from tools.assertions import assert_all
 
+from thrift_bindings.thrift010.Cassandra import (CfDef, Column, ColumnDef,
+                                           ColumnOrSuperColumn, ColumnParent,
+                                           ColumnPath, ColumnSlice,
+                                           ConsistencyLevel, CounterColumn,
+                                           Deletion, IndexExpression,
+                                           IndexOperator, IndexType,
+                                           InvalidRequestException, KeyRange,
+                                           KeySlice, KsDef, MultiSliceRequest,
+                                           Mutation, NotFoundException,
+                                           SlicePredicate, SliceRange,
+                                           SuperColumn)
+
+logger = logging.getLogger(__name__)
 
 # Use static supercolumn data to reduce total test time and avoid driver issues connecting to C* 1.2.
 # The data contained in the SSTables is (name, {'attr': {'name': name}}) for the name in NAMES.
@@ -15,28 +27,28 @@ TABLES_PATH = os.path.join("./", "upgrade_tests", "supercolumn-data", "cassandra
 NAMES = ["Alice", "Bob", "Claire", "Dave", "Ed", "Frank", "Grace"]
 
 
+@pytest.mark.upgrade_test
 class TestSCUpgrade(Tester):
     """
     Tests upgrade between a 2.0 cluster with predefined super columns and all other versions. Verifies data with both
     CQL and Thrift.
     """
-
-    def __init__(self, *args, **kwargs):
-        self.ignore_log_patterns = [
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.allow_log_errors = True
+        fixture_dtest_setup.ignore_log_patterns = (
             # This one occurs if we do a non-rolling upgrade, the node
             # it's trying to send the migration to hasn't started yet,
             # and when it does, it gets replayed and everything is fine.
             r'Can\'t send migration request: node.*is down',
-        ]
+        )
         if CASSANDRA_VERSION_FROM_BUILD < '2.2':
             _known_teardown_race_error = (
                 'ScheduledThreadPoolExecutor$ScheduledFutureTask@[0-9a-f]+ '
                 'rejected from org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor'
             )
             # don't alter ignore_log_patterns on the class, just the obj for this test
-            self.ignore_log_patterns += [_known_teardown_race_error]
-
-        Tester.__init__(self, *args, **kwargs)
+            fixture_dtest_setup.ignore_log_patterns += [_known_teardown_race_error]
 
     def prepare(self, num_nodes=1, cassandra_version="git:cassandra-2.1"):
         cluster = self.cluster
@@ -54,15 +66,20 @@ class TestSCUpgrade(Tester):
         if self.cluster.version() >= '4':
             return
 
-        pool = ConnectionPool("supcols", pool_size=1)
-        super_col_fam = ColumnFamily(pool, "cols")
+        node = self.cluster.nodelist()[0]
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+        client.transport.open()
+        client.set_keyspace('supcols')
+        p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
         for name in NAMES:
-            super_col_value = super_col_fam.get(name)
-            self.assertEqual(OrderedDict([(('attr', u'name'), name)]), super_col_value)
+            super_col_value = client.get_slice(name, ColumnParent("cols"), p, ConsistencyLevel.ONE)
+            logger.debug("get_slice(%s) returned %s" % (name, super_col_value))
+            assert name == super_col_value[0].column.value
 
     def verify_with_cql(self, session):
         session.execute("USE supcols")
-        expected = [[name, 'attr', u'name', name] for name in ['Grace', 'Claire', 'Dave', 'Frank', 'Ed', 'Bob', 'Alice']]
+        expected = [[name, 'attr', 'name', name] for name in ['Grace', 'Claire', 'Dave', 'Frank', 'Ed', 'Bob', 'Alice']]
         assert_all(session, "SELECT * FROM cols", expected)
 
     def _upgrade_super_columns_through_versions_test(self, upgrade_path):
@@ -118,20 +135,20 @@ class TestSCUpgrade(Tester):
 
         cluster.remove(node=node1)
 
-    def upgrade_super_columns_through_all_versions_test(self):
-        self._upgrade_super_columns_through_versions_test(upgrade_path=['git:cassandra-2.2', 'git:cassandra-3.X',
-                                                                        'git:trunk'])
+    def test_upgrade_super_columns_through_all_versions(self):
+        self._upgrade_super_columns_through_versions_test(upgrade_path=['git:cassandra-2.2', 'git:cassandra-3.0',
+                                                                        'git:cassandra-3.11', 'git:trunk'])
 
-    def upgrade_super_columns_through_limited_versions_test(self):
+    def test_upgrade_super_columns_through_limited_versions(self):
         self._upgrade_super_columns_through_versions_test(upgrade_path=['git:cassandra-3.0', 'git:trunk'])
 
     def upgrade_to_version(self, tag, nodes=None):
-        debug('Upgrading to ' + tag)
+        logger.debug('Upgrading to ' + tag)
         if nodes is None:
             nodes = self.cluster.nodelist()
 
         for node in nodes:
-            debug('Shutting down node: ' + node.name)
+            logger.debug('Shutting down node: ' + node.name)
             node.drain()
             node.watch_log_for("DRAINED")
             node.stop(wait_other_notice=False)
@@ -142,12 +159,12 @@ class TestSCUpgrade(Tester):
             if tag < "2.1":
                 if "memtable_allocation_type" in node.config_options:
                     node.config_options.__delitem__("memtable_allocation_type")
-            debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
+            logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
         self.cluster.set_install_dir(version=tag)
 
         # Restart nodes on new version
         for node in nodes:
-            debug('Starting %s on new version (%s)' % (node.name, tag))
+            logger.debug('Starting %s on new version (%s)' % (node.name, tag))
             # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
             node.set_log_level("INFO")
             node.start(wait_other_notice=True, wait_for_binary_proto=True)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/upgrade_through_versions_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/upgrade_through_versions_test.py b/upgrade_tests/upgrade_through_versions_test.py
index a825645..397ea15 100644
--- a/upgrade_tests/upgrade_through_versions_test.py
+++ b/upgrade_tests/upgrade_through_versions_test.py
@@ -5,24 +5,26 @@ import random
 import signal
 import time
 import uuid
+import logging
+import pytest
+import psutil
+
 from collections import defaultdict, namedtuple
 from multiprocessing import Process, Queue
-from Queue import Empty, Full
-from unittest import skipUnless
+from queue import Empty, Full
 
-import psutil
 from cassandra import ConsistencyLevel, WriteTimeout
 from cassandra.query import SimpleStatement
-from nose.plugins.attrib import attr
-from six import print_
 
-from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester, debug
+from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester
 from tools.misc import generate_ssl_stores, new_node
-from upgrade_base import switch_jdks
-from upgrade_manifest import (build_upgrade_pairs, current_2_0_x,
+from .upgrade_base import switch_jdks
+from .upgrade_manifest import (build_upgrade_pairs, current_2_0_x,
                               current_2_1_x, current_2_2_x, current_3_0_x,
                               indev_2_2_x, indev_3_x)
 
+logger = logging.getLogger(__name__)
+
 
 def data_writer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
     """
@@ -67,7 +69,7 @@ def data_writer(tester, to_verify_queue, verification_done_queue, rewrite_probab
 
             to_verify_queue.put_nowait((key, val,))
         except Exception:
-            debug("Error in data writer process!")
+            logger.debug("Error in data writer process!")
             to_verify_queue.close()
             raise
 
@@ -107,7 +109,7 @@ def data_checker(tester, to_verify_queue, verification_done_queue):
             time.sleep(0.1)  # let's not eat CPU if the queue is empty
             continue
         except Exception:
-            debug("Error in data verifier process!")
+            logger.debug("Error in data verifier process!")
             verification_done_queue.close()
             raise
         else:
@@ -165,7 +167,7 @@ def counter_incrementer(tester, to_verify_queue, verification_done_queue, rewrit
 
             to_verify_queue.put_nowait((key, count + 1,))
         except Exception:
-            debug("Error in counter incrementer process!")
+            logger.debug("Error in counter incrementer process!")
             to_verify_queue.close()
             raise
 
@@ -205,7 +207,7 @@ def counter_checker(tester, to_verify_queue, verification_done_queue):
             time.sleep(0.1)  # let's not eat CPU if the queue is empty
             continue
         except Exception:
-            debug("Error in counter verifier process!")
+            logger.debug("Error in counter verifier process!")
             verification_done_queue.close()
             raise
         else:
@@ -221,70 +223,69 @@ def counter_checker(tester, to_verify_queue, verification_done_queue):
                 pass
 
 
-@attr("resource-intensive")
-class UpgradeTester(Tester):
+@pytest.mark.upgrade_test
+@pytest.mark.resource_intensive
+class TestUpgrade(Tester):
     """
     Upgrades a 3-node Murmur3Partitioner cluster through versions specified in test_version_metas.
     """
     test_version_metas = None  # set on init to know which versions to use
     subprocs = None  # holds any subprocesses, for status checking and cleanup
     extra_config = None  # holds a non-mutable structure that can be cast as dict()
-    __test__ = False  # this is a base class only
-    ignore_log_patterns = (
-        # This one occurs if we do a non-rolling upgrade, the node
-        # it's trying to send the migration to hasn't started yet,
-        # and when it does, it gets replayed and everything is fine.
-        r'Can\'t send migration request: node.*is down',
-        r'RejectedExecutionException.*ThreadPoolExecutor has shut down',
-        # Occurs due to test/ccm writing topo on down nodes
-        r'Cannot update data center or rack from.*for live host',
-        # Normal occurance. See CASSANDRA-12026. Likely won't be needed after C* 4.0.
-        r'Unknown column cdc during deserialization',
-    )
 
-    def __init__(self, *args, **kwargs):
-        self.subprocs = []
-        Tester.__init__(self, *args, **kwargs)
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            # This one occurs if we do a non-rolling upgrade, the node
+            # it's trying to send the migration to hasn't started yet,
+            # and when it does, it gets replayed and everything is fine.
+            r'Can\'t send migration request: node.*is down',
+            r'RejectedExecutionException.*ThreadPoolExecutor has shut down',
+            # Occurs due to test/ccm writing topo on down nodes
+            r'Cannot update data center or rack from.*for live host',
+            # Normal occurance. See CASSANDRA-12026. Likely won't be needed after C* 4.0.
+            r'Unknown column cdc during deserialization',
+        )
 
     def setUp(self):
-        debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
+        logger.debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
               .format(self.test_version_metas[0].version, self.test_version_metas[0].java_version))
         os.environ['CASSANDRA_VERSION'] = self.test_version_metas[0].version
         switch_jdks(self.test_version_metas[0].java_version)
 
-        super(UpgradeTester, self).setUp()
-        debug("Versions to test (%s): %s" % (type(self), str([v.version for v in self.test_version_metas])))
+        super(TestUpgrade, self).setUp()
+        logger.debug("Versions to test (%s): %s" % (type(self), str([v.version for v in self.test_version_metas])))
 
     def init_config(self):
         Tester.init_config(self)
 
         if self.extra_config is not None:
-            debug("Setting extra configuration options:\n{}".format(
+            logger.debug("Setting extra configuration options:\n{}".format(
                 pprint.pformat(dict(self.extra_config), indent=4))
             )
             self.cluster.set_configuration_options(
                 values=dict(self.extra_config)
             )
 
-    def parallel_upgrade_test(self):
+    def test_parallel_upgrade(self):
         """
         Test upgrading cluster all at once (requires cluster downtime).
         """
         self.upgrade_scenario()
 
-    def rolling_upgrade_test(self):
+    def test_rolling_upgrade(self):
         """
         Test rolling upgrade of the cluster, so we have mixed versions part way through.
         """
         self.upgrade_scenario(rolling=True)
 
-    def parallel_upgrade_with_internode_ssl_test(self):
+    def test_parallel_upgrade_with_internode_ssl(self):
         """
         Test upgrading cluster all at once (requires cluster downtime), with internode ssl.
         """
         self.upgrade_scenario(internode_ssl=True)
 
-    def rolling_upgrade_with_internode_ssl_test(self):
+    def test_rolling_upgrade_with_internode_ssl(self):
         """
         Rolling upgrade test using internode ssl.
         """
@@ -301,17 +302,17 @@ class UpgradeTester(Tester):
             cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
 
         if internode_ssl:
-            debug("***using internode ssl***")
-            generate_ssl_stores(self.test_path)
-            self.cluster.enable_internode_ssl(self.test_path)
+            logger.debug("***using internode ssl***")
+            generate_ssl_stores(self.fixture_dtest_setup.test_path)
+            self.cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path)
 
         if populate:
             # Start with 3 node cluster
-            debug('Creating cluster (%s)' % self.test_version_metas[0].version)
+            logger.debug('Creating cluster (%s)' % self.test_version_metas[0].version)
             cluster.populate(3)
             [node.start(use_jna=True, wait_for_binary_proto=True) for node in cluster.nodelist()]
         else:
-            debug("Skipping cluster creation (should already be built)")
+            logger.debug("Skipping cluster creation (should already be built)")
 
         # add nodes to self for convenience
         for i, node in enumerate(cluster.nodelist(), 1):
@@ -324,7 +325,7 @@ class UpgradeTester(Tester):
             else:
                 self._create_schema()
         else:
-            debug("Skipping schema creation (should already be built)")
+            logger.debug("Skipping schema creation (should already be built)")
         time.sleep(5)  # sigh...
 
         self._log_current_ver(self.test_version_metas[0])
@@ -344,8 +345,8 @@ class UpgradeTester(Tester):
 
                     self.upgrade_to_version(version_meta, partial=True, nodes=(node,), internode_ssl=internode_ssl)
 
-                    self._check_on_subprocs(self.subprocs)
-                    debug('Successfully upgraded %d of %d nodes to %s' %
+                    self._check_on_subprocs(self.fixture_dtest_setup.subprocs)
+                    logger.debug('Successfully upgraded %d of %d nodes to %s' %
                           (num + 1, len(self.cluster.nodelist()), version_meta.version))
 
                 self.cluster.set_install_dir(version=version_meta.version)
@@ -375,7 +376,7 @@ class UpgradeTester(Tester):
         for call in after_upgrade_call:
             call()
 
-            debug('All nodes successfully upgraded to %s' % version_meta.version)
+            logger.debug('All nodes successfully upgraded to %s' % version_meta.version)
             self._log_current_ver(version_meta)
 
         cluster.stop()
@@ -384,7 +385,7 @@ class UpgradeTester(Tester):
         # just to be super sure we get cleaned up
         self._terminate_subprocs()
 
-        super(UpgradeTester, self).tearDown()
+        super(TestUpgrade, self).tearDown()
 
     def _check_on_subprocs(self, subprocs):
         """
@@ -402,12 +403,12 @@ class UpgradeTester(Tester):
             raise RuntimeError(message)
 
     def _terminate_subprocs(self):
-        for s in self.subprocs:
+        for s in self.fixture_dtest_setup.subprocs:
             if s.is_alive():
                 try:
                     psutil.Process(s.pid).kill()  # with fire damnit
                 except Exception:
-                    debug("Error terminating subprocess. There could be a lingering process.")
+                    logger.debug("Error terminating subprocess. There could be a lingering process.")
                     pass
 
     def upgrade_to_version(self, version_meta, partial=False, nodes=None, internode_ssl=False):
@@ -416,21 +417,21 @@ class UpgradeTester(Tester):
         that are specified by *nodes*, otherwise ignore *nodes* specified
         and upgrade all nodes.
         """
-        debug('Upgrading {nodes} to {version}'.format(nodes=[n.name for n in nodes] if nodes is not None else 'all nodes', version=version_meta.version))
+        logger.debug('Upgrading {nodes} to {version}'.format(nodes=[n.name for n in nodes] if nodes is not None else 'all nodes', version=version_meta.version))
         switch_jdks(version_meta.java_version)
-        debug("JAVA_HOME: " + os.environ.get('JAVA_HOME'))
+        logger.debug("JAVA_HOME: " + os.environ.get('JAVA_HOME'))
         if not partial:
             nodes = self.cluster.nodelist()
 
         for node in nodes:
-            debug('Shutting down node: ' + node.name)
+            logger.debug('Shutting down node: ' + node.name)
             node.drain()
             node.watch_log_for("DRAINED")
             node.stop(wait_other_notice=False)
 
         for node in nodes:
             node.set_install_dir(version=version_meta.version)
-            debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
+            logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
             if internode_ssl and version_meta.version >= '4.0':
                 node.set_configuration_options({'server_encryption_options': {'enabled': True, 'enable_legacy_ssl_storage_port': True}})
 
@@ -441,7 +442,7 @@ class UpgradeTester(Tester):
 
         # Restart nodes on new version
         for node in nodes:
-            debug('Starting %s on new version (%s)' % (node.name, version_meta.version))
+            logger.debug('Starting %s on new version (%s)' % (node.name, version_meta.version))
             # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
             node.set_log_level("INFO")
             node.start(wait_other_notice=240, wait_for_binary_proto=True)
@@ -453,7 +454,7 @@ class UpgradeTester(Tester):
         """
         vers = [m.version for m in self.test_version_metas]
         curr_index = vers.index(current_version_meta.version)
-        debug(
+        logger.debug(
             "Current upgrade path: {}".format(
                 vers[:curr_index] + ['***' + current_version_meta.version + '***'] + vers[curr_index + 1:]))
 
@@ -496,7 +497,7 @@ class UpgradeTester(Tester):
     def _write_values(self, num=100):
         session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
         session.execute("use upgrade")
-        for i in xrange(num):
+        for i in range(num):
             x = len(self.row_values) + 1
             session.execute("UPDATE cf SET v='%d' WHERE k=%d" % (x, x))
             self.row_values.add(x)
@@ -509,8 +510,8 @@ class UpgradeTester(Tester):
                 query = SimpleStatement("SELECT k,v FROM cf WHERE k=%d" % x, consistency_level=consistency_level)
                 result = session.execute(query)
                 k, v = result[0]
-                self.assertEqual(x, k)
-                self.assertEqual(str(x), v)
+                assert x == k
+                assert str(x) == v
 
     def _wait_until_queue_condition(self, label, queue, opfunc, required_len, max_wait_s=600):
         """
@@ -528,14 +529,14 @@ class UpgradeTester(Tester):
             try:
                 qsize = queue.qsize()
             except NotImplementedError:
-                debug("Queue size may not be checkable on Mac OS X. Test will continue without waiting.")
+                logger.debug("Queue size may not be checkable on Mac OS X. Test will continue without waiting.")
                 break
             if opfunc(qsize, required_len):
-                debug("{} queue size ({}) is '{}' to {}. Continuing.".format(label, qsize, opfunc.__name__, required_len))
+                logger.debug("{} queue size ({}) is '{}' to {}. Continuing.".format(label, qsize, opfunc.__name__, required_len))
                 break
 
             if divmod(round(time.time()), 30)[1] == 0:
-                debug("{} queue size is at {}, target is to reach '{}' {}".format(label, qsize, opfunc.__name__, required_len))
+                logger.debug("{} queue size is at {}, target is to reach '{}' {}".format(label, qsize, opfunc.__name__, required_len))
 
             time.sleep(0.1)
             continue
@@ -559,7 +560,7 @@ class UpgradeTester(Tester):
         writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
         # daemon subprocesses are killed automagically when the parent process exits
         writer.daemon = True
-        self.subprocs.append(writer)
+        self.fixture_dtest_setup.subprocs.append(writer)
         writer.start()
 
         if wait_for_rowcount > 0:
@@ -568,7 +569,7 @@ class UpgradeTester(Tester):
         verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
         # daemon subprocesses are killed automagically when the parent process exits
         verifier.daemon = True
-        self.subprocs.append(verifier)
+        self.fixture_dtest_setup.subprocs.append(verifier)
         verifier.start()
 
         return writer, verifier, to_verify_queue
@@ -588,7 +589,7 @@ class UpgradeTester(Tester):
         incrementer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
         # daemon subprocesses are killed automagically when the parent process exits
         incrementer.daemon = True
-        self.subprocs.append(incrementer)
+        self.fixture_dtest_setup.subprocs.append(incrementer)
         incrementer.start()
 
         if wait_for_rowcount > 0:
@@ -597,13 +598,13 @@ class UpgradeTester(Tester):
         count_verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
         # daemon subprocesses are killed automagically when the parent process exits
         count_verifier.daemon = True
-        self.subprocs.append(count_verifier)
+        self.fixture_dtest_setup.subprocs.append(count_verifier)
         count_verifier.start()
 
         return incrementer, count_verifier, to_verify_queue
 
     def _increment_counters(self, opcount=25000):
-        debug("performing {opcount} counter increments".format(opcount=opcount))
+        logger.debug("performing {opcount} counter increments".format(opcount=opcount))
         session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
         session.execute("use upgrade;")
 
@@ -616,7 +617,7 @@ class UpgradeTester(Tester):
         fail_count = 0
 
         for i in range(opcount):
-            key1 = random.choice(self.expected_counts.keys())
+            key1 = random.choice(list(self.expected_counts.keys()))
             key2 = random.randint(1, 10)
             try:
                 query = SimpleStatement(update_counter_query.format(key1=key1, key2=key2), consistency_level=ConsistencyLevel.ALL)
@@ -628,15 +629,15 @@ class UpgradeTester(Tester):
             if fail_count > 100:
                 break
 
-        self.assertLess(fail_count, 100, "Too many counter increment failures")
+        assert fail_count, 100 < "Too many counter increment failures"
 
     def _check_counters(self):
-        debug("Checking counter values...")
+        logger.debug("Checking counter values...")
         session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
         session.execute("use upgrade;")
 
-        for key1 in self.expected_counts.keys():
-            for key2 in self.expected_counts[key1].keys():
+        for key1 in list(self.expected_counts.keys()):
+            for key2 in list(self.expected_counts[key1].keys()):
                 expected_value = self.expected_counts[key1][key2]
 
                 query = SimpleStatement("SELECT c from countertable where k1='{key1}' and k2={key2};".format(key1=key1, key2=key2),
@@ -649,10 +650,10 @@ class UpgradeTester(Tester):
                     # counter wasn't found
                     actual_value = None
 
-                self.assertEqual(actual_value, expected_value)
+                assert actual_value == expected_value
 
     def _check_select_count(self, consistency_level=ConsistencyLevel.ALL):
-        debug("Checking SELECT COUNT(*)")
+        logger.debug("Checking SELECT COUNT(*)")
         session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
         session.execute("use upgrade;")
 
@@ -663,7 +664,7 @@ class UpgradeTester(Tester):
 
         if result is not None:
             actual_num_rows = result[0][0]
-            self.assertEqual(actual_num_rows, expected_num_rows, "SELECT COUNT(*) returned %s when expecting %s" % (actual_num_rows, expected_num_rows))
+            assert actual_num_rows == expected_num_rows, "SELECT COUNT(*) returned %s when expecting %s" % (actual_num_rows, expected_num_rows)
         else:
             self.fail("Count query did not return")
 
@@ -678,7 +679,7 @@ class BootstrapMixin(object):
 
     def _bootstrap_new_node(self):
         # Check we can bootstrap a new node on the upgraded cluster:
-        debug("Adding a node to the cluster")
+        logger.debug("Adding a node to the cluster")
         nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)))
         nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
         self._write_values()
@@ -688,7 +689,7 @@ class BootstrapMixin(object):
 
     def _bootstrap_new_node_multidc(self):
         # Check we can bootstrap a new node on the upgraded cluster:
-        debug("Adding a node to the cluster")
+        logger.debug("Adding a node to the cluster")
         nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)), data_center='dc2')
 
         nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
@@ -697,11 +698,11 @@ class BootstrapMixin(object):
         self._check_values()
         self._check_counters()
 
-    def bootstrap_test(self):
+    def test_bootstrap(self):
         # try and add a new node
         self.upgrade_scenario(after_upgrade_call=(self._bootstrap_new_node,))
 
-    def bootstrap_multidc_test(self):
+    def test_bootstrap_multidc(self):
         # try and add a new node
         # multi dc, 2 nodes in each dc
         cluster = self.cluster
@@ -759,26 +760,26 @@ def create_upgrade_class(clsname, version_metas, protocol_version,
         extra_config = (('partitioner', 'org.apache.cassandra.dht.Murmur3Partitioner'),)
 
     if bootstrap_test:
-        parent_classes = (UpgradeTester, BootstrapMixin)
+        parent_classes = (TestUpgrade, BootstrapMixin)
     else:
-        parent_classes = (UpgradeTester,)
+        parent_classes = (TestUpgrade,)
 
     # short names for debug output
     parent_class_names = [cls.__name__ for cls in parent_classes]
 
-    print_("Creating test class {} ".format(clsname))
-    print_("  for C* versions:\n{} ".format(pprint.pformat(version_metas)))
-    print_("  using protocol: v{}, and parent classes: {}".format(protocol_version, parent_class_names))
-    print_("  to run these tests alone, use `nosetests {}.py:{}`".format(__name__, clsname))
+    print("Creating test class {} ".format(clsname))
+    print("  for C* versions:\n{} ".format(pprint.pformat(version_metas)))
+    print("  using protocol: v{}, and parent classes: {}".format(protocol_version, parent_class_names))
+    print("  to run these tests alone, use `nosetests {}.py:{}`".format(__name__, clsname))
 
     upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or version_metas[-1].matches_current_env_version_family
-
-    newcls = skipUnless(upgrade_applies_to_env, 'test not applicable to env.')(
-        type(
+    if not upgrade_applies_to_env:
+        pytest.mark.skip(reason='test not applicable to env.')
+    newcls = type(
             clsname,
             parent_classes,
             {'test_version_metas': version_metas, '__test__': True, 'protocol_version': protocol_version, 'extra_config': extra_config}
-        ))
+        )
 
     if clsname in globals():
         raise RuntimeError("Class by name already exists!")
@@ -837,7 +838,7 @@ for upgrade in MULTI_UPGRADES:
                 # looks like this test should actually run in the current env, so let's set the final version to match the env exactly
                 oldmeta = metas[-1]
                 newmeta = oldmeta.clone_with_local_env_version()
-                debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(upgrade.name, oldmeta.version, newmeta.version))
+                logger.debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(upgrade.name, oldmeta.version, newmeta.version))
                 metas[-1] = newmeta
 
         create_upgrade_class(upgrade.name, [m for m in metas], protocol_version=upgrade.protocol_version, extra_config=upgrade.extra_config)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/user_functions_test.py
----------------------------------------------------------------------
diff --git a/user_functions_test.py b/user_functions_test.py
index 5982f28..7a10233 100644
--- a/user_functions_test.py
+++ b/user_functions_test.py
@@ -1,22 +1,39 @@
 import math
 import time
+import pytest
+import logging
+
 from distutils.version import LooseVersion
 
 from cassandra import FunctionFailure
 
-from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester, debug, create_ks
+from dtest_setup_overrides import DTestSetupOverrides
+
+from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester, create_ks
 from tools.assertions import assert_invalid, assert_none, assert_one
-from tools.decorators import since
 from tools.misc import ImmutableMapping
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 @since('2.2')
 class TestUserFunctions(Tester):
-    if CASSANDRA_VERSION_FROM_BUILD >= '3.0':
-        cluster_options = ImmutableMapping({'enable_user_defined_functions': 'true',
-                                            'enable_scripted_user_defined_functions': 'true'})
-    else:
-        cluster_options = ImmutableMapping({'enable_user_defined_functions': 'true'})
+
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        if CASSANDRA_VERSION_FROM_BUILD >= '3.0':
+            dtest_setup_overrides.cluster_options = ImmutableMapping({'enable_user_defined_functions': 'true',
+                                                'enable_scripted_user_defined_functions': 'true'})
+        else:
+            dtest_setup_overrides.cluster_options = ImmutableMapping({'enable_user_defined_functions': 'true'})
+        return dtest_setup_overrides
+
+    @pytest.fixture(scope='function', autouse=True)
+    def parse_dtest_config(self, parse_dtest_config):
+
+        return parse_dtest_config
 
     def prepare(self, create_keyspace=True, nodes=1, rf=1):
         cluster = self.cluster
@@ -120,7 +137,7 @@ class TestUserFunctions(Tester):
                        "CREATE FUNCTION bad_sin ( input double ) CALLED ON NULL INPUT RETURNS uuid LANGUAGE java AS 'return Math.sin(input);';",
                        "Type mismatch: cannot convert from double to UUID")
 
-    def udf_overload_test(self):
+    def test_udf_overload(self):
 
         session = self.prepare(nodes=3)
 
@@ -154,7 +171,7 @@ class TestUserFunctions(Tester):
         # should now work - unambiguous
         session.execute("DROP FUNCTION overloaded")
 
-    def udf_scripting_test(self):
+    def test_udf_scripting(self):
         session = self.prepare()
         session.execute("create table nums (key int primary key, val double);")
 
@@ -177,7 +194,7 @@ class TestUserFunctions(Tester):
 
         assert_one(session, "select plustwo(key) from nums where key = 3", [5])
 
-    def default_aggregate_test(self):
+    def test_default_aggregate(self):
         session = self.prepare()
         session.execute("create table nums (key int primary key, val double);")
 
@@ -190,7 +207,7 @@ class TestUserFunctions(Tester):
         assert_one(session, "SELECT avg(val) FROM nums", [5.0])
         assert_one(session, "SELECT count(*) FROM nums", [9])
 
-    def aggregate_udf_test(self):
+    def test_aggregate_udf(self):
         session = self.prepare()
         session.execute("create table nums (key int primary key, val int);")
 
@@ -209,7 +226,7 @@ class TestUserFunctions(Tester):
 
         assert_invalid(session, "create aggregate aggthree(int) sfunc test stype int finalfunc aggtwo")
 
-    def udf_with_udt_test(self):
+    def test_udf_with_udt(self):
         """
         Test UDFs that operate on non-frozen UDTs.
         @jira_ticket CASSANDRA-7423
@@ -225,7 +242,7 @@ class TestUserFunctions(Tester):
             frozen_vals = (True,)
 
         for frozen in frozen_vals:
-            debug("Using {} UDTs".format("frozen" if frozen else "non-frozen"))
+            logger.debug("Using {} UDTs".format("frozen" if frozen else "non-frozen"))
 
             table_name = "tab_frozen" if frozen else "tab"
             column_type = "frozen<test>" if frozen else "test"
@@ -240,7 +257,7 @@ class TestUserFunctions(Tester):
             assert_invalid(session, "drop type test;")
 
     @since('2.2')
-    def udf_with_udt_keyspace_isolation_test(self):
+    def test_udf_with_udt_keyspace_isolation(self):
         """
         Ensure functions dont allow a UDT from another keyspace
         @jira_ticket CASSANDRA-9409
@@ -266,7 +283,7 @@ class TestUserFunctions(Tester):
             "Statement on keyspace user_ks cannot refer to a user type in keyspace ks"
         )
 
-    def aggregate_with_udt_keyspace_isolation_test(self):
+    def test_aggregate_with_udt_keyspace_isolation(self):
         """
         Ensure aggregates dont allow a UDT from another keyspace
         @jira_ticket CASSANDRA-9409


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[21/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/read_repair_test.py
----------------------------------------------------------------------
diff --git a/read_repair_test.py b/read_repair_test.py
index 57fbf40..7e8d405 100644
--- a/read_repair_test.py
+++ b/read_repair_test.py
@@ -1,23 +1,28 @@
 import time
+import pytest
+import logging
 
 from cassandra import ConsistencyLevel
 from cassandra.query import SimpleStatement
 
+from dtest import Tester, create_ks
 from tools.assertions import assert_one
-from dtest import PRINT_DEBUG, Tester, debug, create_ks
 from tools.data import rows_to_list
-from tools.decorators import since
+from tools.misc import retry_till_success
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestReadRepair(Tester):
 
-    def setUp(self):
-        Tester.setUp(self)
-        self.cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
-        self.cluster.populate(3).start(wait_for_binary_proto=True)
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_set_cluster_settings(self, fixture_dtest_setup):
+        fixture_dtest_setup.cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
+        fixture_dtest_setup.cluster.populate(3).start(wait_for_binary_proto=True)
 
     @since('3.0')
-    def alter_rf_and_run_read_repair_test(self):
+    def test_alter_rf_and_run_read_repair(self):
         """
         @jira_ticket CASSANDRA-10655
         @jira_ticket CASSANDRA-10657
@@ -25,90 +30,127 @@ class TestReadRepair(Tester):
         Test that querying only a subset of all the columns in a row doesn't confuse read-repair to avoid
         the problem described in CASSANDRA-10655.
         """
-        self._test_read_repair()
+
+        # session is only used to setup & do schema modification. Actual data queries are done directly on
+        # each node, using an exclusive connection and CL.ONE
+        session = self.patient_cql_connection(self.cluster.nodelist()[0])
+        initial_replica, non_replicas = self.do_initial_setup(session)
+
+        # Execute a query at CL.ALL on one of the nodes which was *not* the initial replica. It should trigger a
+        # read repair and propagate the data to all 3 nodes.
+        # Note: result of the read repair contains only the selected column (a), not all columns
+        logger.debug("Executing 'SELECT a...' on non-initial replica to trigger read repair " + non_replicas[0].name)
+        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
+        assert_one(read_repair_session, "SELECT a FROM alter_rf_test.t1 WHERE k=1", [1], cl=ConsistencyLevel.ALL)
+
+        # The read repair should have repaired the replicas, at least partially (see CASSANDRA-10655)
+        # verify by querying each replica in turn.
+        self.check_data_on_each_replica(expect_fully_repaired=False, initial_replica=initial_replica)
+
+        # Now query again at CL.ALL but this time selecting all columns, which should ensure that 'b' also gets repaired
+        query = "SELECT * FROM alter_rf_test.t1 WHERE k=1"
+        logger.debug("Executing 'SELECT *...' on non-initial replica to trigger read repair " + non_replicas[0].name)
+        assert_one(read_repair_session, query, [1, 1, 1], cl=ConsistencyLevel.ALL)
+
+        # Check each replica individually again now that we expect the data to be fully repaired
+        self.check_data_on_each_replica(expect_fully_repaired=True, initial_replica=initial_replica)
 
     def test_read_repair_chance(self):
         """
         @jira_ticket CASSANDRA-12368
         """
-        self._test_read_repair(cl_all=False)
-
-    def _test_read_repair(self, cl_all=True):
+        # session is only used to setup & do schema modification. Actual data queries are done directly on
+        # each node, using an exclusive connection and CL.ONE
         session = self.patient_cql_connection(self.cluster.nodelist()[0])
+        initial_replica, non_replicas = self.do_initial_setup(session)
+
+        # To ensure read repairs are triggered, set the table property to 100%
+        logger.debug("Setting table read repair chance to 1")
+        session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 1;""")
+
+        # Execute a query at CL.ONE on one of the nodes which was *not* the initial replica. It should trigger a
+        # read repair because read_repair_chance == 1, and propagate the data to all 3 nodes.
+        # Note: result of the read repair contains only the selected column (a), not all columns, so we won't expect
+        # 'b' to have been fully repaired afterwards.
+        logger.debug("Executing 'SELECT a...' on non-initial replica to trigger read repair " + non_replicas[0].name)
+        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
+        read_repair_session.execute(SimpleStatement("SELECT a FROM alter_rf_test.t1 WHERE k=1",
+                                                    consistency_level=ConsistencyLevel.ONE))
+
+        # Query each replica individually to ensure that read repair was triggered. We should expect that only
+        # the initial replica has data for both the 'a' and 'b' columns. The read repair should only have affected
+        # the selected column, so the other two replicas should only have that data.
+        # Note: we need to temporarily set read_repair_chance to 0 while we perform this check.
+        logger.debug("Setting table read repair chance to 0 while we verify each replica's data")
+        session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 0;""")
+        # The read repair is run in the background, so we spin while checking that the repair has completed
+        retry_till_success(self.check_data_on_each_replica,
+                           expect_fully_repaired=False,
+                           initial_replica=initial_replica,
+                           timeout=30,
+                           bypassed_exception=NotRepairedException)
+
+        # Re-enable global read repair and perform another query on a non-replica. This time the query selects all
+        # columns so we also expect the value for 'b' to be repaired.
+        logger.debug("Setting table read repair chance to 1")
+        session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 1;""")
+        logger.debug("Executing 'SELECT *...' on non-initial replica to trigger read repair " + non_replicas[0].name)
+        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
+        read_repair_session.execute(SimpleStatement("SELECT * FROM alter_rf_test.t1 WHERE k=1",
+                                                    consistency_level=ConsistencyLevel.ONE))
+
+        # Query each replica again to ensure that second read repair was triggered. This time, we expect the
+        # data to be fully repaired (both 'a' and 'b' columns) by virtue of the query being 'SELECT *...'
+        # As before, we turn off read repair before doing this check.
+        logger.debug("Setting table read repair chance to 0 while we verify each replica's data")
+        session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 0;""")
+        retry_till_success(self.check_data_on_each_replica,
+                           expect_fully_repaired=True,
+                           initial_replica=initial_replica,
+                           timeout=30,
+                           bypassed_exception=NotRepairedException)
+
+    def do_initial_setup(self, session):
+        """
+        Create a keyspace with rf=1 and a table containing a single row with 2 non-primary key columns.
+        Insert 1 row, placing the data on a single initial replica. Then, alter the keyspace to rf=3, but don't
+        repair. Tests will execute various reads on the replicas and assert the effects of read repair.
+        :param session: Used to perform the schema setup & insert the data
+        :return: a tuple containing the node which initially acts as the replica, and a list of the other two nodes
+        """
+        # Disable speculative retry to make it clear that we only query additional nodes because of read_repair_chance
         session.execute("""CREATE KEYSPACE alter_rf_test
                            WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};""")
-        session.execute("CREATE TABLE alter_rf_test.t1 (k int PRIMARY KEY, a int, b int);")
+        session.execute("CREATE TABLE alter_rf_test.t1 (k int PRIMARY KEY, a int, b int) WITH speculative_retry='NONE';")
         session.execute("INSERT INTO alter_rf_test.t1 (k, a, b) VALUES (1, 1, 1);")
-        cl_one_stmt = SimpleStatement("SELECT * FROM alter_rf_test.t1 WHERE k=1",
-                                      consistency_level=ConsistencyLevel.ONE)
 
         # identify the initial replica and trigger a flush to ensure reads come from sstables
-        initial_replica, non_replicas = self.identify_initial_placement('alter_rf_test', 't1', 1)
-        debug("At RF=1 replica for data is " + initial_replica.name)
+        initial_replica, non_replicas = self.identify_initial_placement()
+        logger.debug("At RF=1 replica for data is " + initial_replica.name)
         initial_replica.flush()
 
+        # Just some basic validation.
         # At RF=1, it shouldn't matter which node we query, as the actual data should always come from the
         # initial replica when reading at CL ONE
         for n in self.cluster.nodelist():
-            debug("Checking " + n.name)
+            logger.debug("Checking " + n.name)
             session = self.patient_exclusive_cql_connection(n)
             assert_one(session, "SELECT * FROM alter_rf_test.t1 WHERE k=1", [1, 1, 1], cl=ConsistencyLevel.ONE)
 
-        # Alter so RF=n but don't repair, then execute a query which selects only a subset of the columns. Run this at
-        # CL ALL on one of the nodes which doesn't currently have the data, triggering a read repair.
-        # The expectation will be that every replicas will have been repaired for that column (but we make no assumptions
-        # on the other columns).
-        debug("Changing RF from 1 to 3")
+        # Alter so RF=n but don't repair, calling tests will execute queries to exercise read repair,
+        # either at CL.ALL or after setting read_repair_chance to 100%.
+        logger.debug("Changing RF from 1 to 3")
         session.execute("""ALTER KEYSPACE alter_rf_test
                            WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};""")
 
-        if not cl_all:
-            debug("Setting table read repair chance to 1")
-            session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 1;""")
-
-        cl = ConsistencyLevel.ALL if cl_all else ConsistencyLevel.ONE
-
-        debug("Executing SELECT on non-initial replica to trigger read repair " + non_replicas[0].name)
-        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
-
-        if cl_all:
-            # result of the read repair query at cl=ALL contains only the selected column
-            assert_one(read_repair_session, "SELECT a FROM alter_rf_test.t1 WHERE k=1", [1], cl=cl)
-        else:
-            # With background read repair at CL=ONE, result may or may not be correct
-            stmt = SimpleStatement("SELECT a FROM alter_rf_test.t1 WHERE k=1", consistency_level=cl)
-            session.execute(stmt)
-
-        # Check the results of the read repair by querying each replica again at CL ONE
-        debug("Re-running SELECTs at CL ONE to verify read repair")
-        for n in self.cluster.nodelist():
-            debug("Checking " + n.name)
-            session = self.patient_exclusive_cql_connection(n)
-            res = rows_to_list(session.execute(cl_one_stmt))
-            # Column a must be 1 everywhere, and column b must be either 1 or None everywhere
-            self.assertIn(res[0][:2], [[1, 1], [1, None]])
-
-        # Now query selecting all columns
-        query = "SELECT * FROM alter_rf_test.t1 WHERE k=1"
-        debug("Executing SELECT on non-initial replica to trigger read repair " + non_replicas[0].name)
-        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
-
-        if cl_all:
-            # result of the read repair query at cl=ALL should contain all columns
-            assert_one(session, query, [1, 1, 1], cl=cl)
-        else:
-            # With background read repair at CL=ONE, result may or may not be correct
-            stmt = SimpleStatement(query, consistency_level=cl)
-            session.execute(stmt)
-
-        # Check all replica is fully up to date
-        debug("Re-running SELECTs at CL ONE to verify read repair")
-        for n in self.cluster.nodelist():
-            debug("Checking " + n.name)
-            session = self.patient_exclusive_cql_connection(n)
-            assert_one(session, query, [1, 1, 1], cl=ConsistencyLevel.ONE)
+        return initial_replica, non_replicas
 
-    def identify_initial_placement(self, keyspace, table, key):
+    def identify_initial_placement(self):
+        """
+        Identify which node in the 3 node cluster contains the specific key at the point that the test keyspace has
+        rf=1.
+        :return: tuple containing the initial replica, plus a list of the other 2 replicas.
+        """
         nodes = self.cluster.nodelist()
         out, _, _ = nodes[0].nodetool("getendpoints alter_rf_test t1 1")
         address = out.split('\n')[-2]
@@ -120,12 +162,31 @@ class TestReadRepair(Tester):
             else:
                 non_replicas.append(node)
 
-        self.assertIsNotNone(initial_replica, "Couldn't identify initial replica")
+        assert initial_replica is not None, "Couldn't identify initial replica"
 
         return initial_replica, non_replicas
 
+    def check_data_on_each_replica(self, expect_fully_repaired, initial_replica):
+        """
+        Perform a SELECT * query at CL.ONE on each replica in turn. If expect_fully_repaired is True, we verify that
+        each replica returns the full row being queried. If not, then we only verify that the 'a' column has been
+        repaired.
+        """
+        stmt = SimpleStatement("SELECT * FROM alter_rf_test.t1 WHERE k=1", consistency_level=ConsistencyLevel.ONE)
+        logger.debug("Checking all if read repair has completed on all replicas")
+        for n in self.cluster.nodelist():
+            logger.debug("Checking {n}, {x}expecting all columns"
+                         .format(n=n.name, x="" if expect_fully_repaired or n == initial_replica else "not "))
+            session = self.patient_exclusive_cql_connection(n)
+            res = rows_to_list(session.execute(stmt))
+            logger.debug("Actual result: " + str(res))
+            expected = [[1, 1, 1]] if expect_fully_repaired or n == initial_replica else [[1, 1, None]]
+            if res != expected:
+                raise NotRepairedException()
+
+
     @since('2.0')
-    def range_slice_query_with_tombstones_test(self):
+    def test_range_slice_query_with_tombstones(self):
         """
         @jira_ticket CASSANDRA-8989
         @jira_ticket CASSANDRA-9502
@@ -174,9 +235,9 @@ class TestReadRepair(Tester):
         for trace_event in trace.events:
             # Step 1, find coordinator node:
             activity = trace_event.description
-            self.assertNotIn("Appending to commitlog", activity)
-            self.assertNotIn("Adding to cf memtable", activity)
-            self.assertNotIn("Acquiring switchLock read lock", activity)
+            assert "Appending to commitlog" not in activity
+            assert "Adding to cf memtable" not in activity
+            assert "Acquiring switchLock read lock" not in activity
 
     @since('3.0')
     def test_gcable_tombstone_resurrection_on_range_slice_query(self):
@@ -221,12 +282,21 @@ class TestReadRepair(Tester):
         self.pprint_trace(trace)
         for trace_event in trace.events:
             activity = trace_event.description
-            self.assertNotIn("Sending READ_REPAIR message", activity)
+            assert "Sending READ_REPAIR message" not in activity
 
     def pprint_trace(self, trace):
         """Pretty print a trace"""
-        if PRINT_DEBUG:
-            print("-" * 40)
+        if logging.root.level == logging.DEBUG:
+            print(("-" * 40))
             for t in trace.events:
-                print("%s\t%s\t%s\t%s" % (t.source, t.source_elapsed, t.description, t.thread_name))
-            print("-" * 40)
+                print(("%s\t%s\t%s\t%s" % (t.source, t.source_elapsed, t.description, t.thread_name)))
+            print(("-" * 40))
+
+
+class NotRepairedException(Exception):
+    """
+    Thrown to indicate that the data on a replica hasn't been doesn't match what we'd expect if a
+    specific read repair has run. See check_data_on_each_replica.
+    """
+    pass
+

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/rebuild_test.py
----------------------------------------------------------------------
diff --git a/rebuild_test.py b/rebuild_test.py
index 795c945..919bd54 100644
--- a/rebuild_test.py
+++ b/rebuild_test.py
@@ -1,26 +1,36 @@
+import pytest
 import time
+import logging
+
+from flaky import flaky
+
 from threading import Thread
 
 from cassandra import ConsistencyLevel
 from ccmlib.node import ToolError
 
-from dtest import Tester, debug, create_ks, create_cf
+from dtest import Tester, create_ks, create_cf
 from tools.data import insert_c1c2, query_c1c2
-from tools.decorators import since, no_vnodes
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestRebuild(Tester):
-    ignore_log_patterns = (
-        # This one occurs when trying to send the migration to a
-        # node that hasn't started yet, and when it does, it gets
-        # replayed and everything is fine.
-        r'Can\'t send migration request: node.*is down',
-        # ignore streaming error during bootstrap
-        r'Exception encountered during startup',
-        r'Streaming error occurred'
-    )
-
-    def simple_rebuild_test(self):
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            # This one occurs when trying to send the migration to a
+            # node that hasn't started yet, and when it does, it gets
+            # replayed and everything is fine.
+            r'Can\'t send migration request: node.*is down',
+            # ignore streaming error during bootstrap
+            r'Exception encountered during startup',
+            r'Streaming error occurred'
+        )
+
+    def test_simple_rebuild(self):
         """
         @jira_ticket CASSANDRA-9119
 
@@ -48,7 +58,7 @@ class TestRebuild(Tester):
         insert_c1c2(session, n=keys, consistency=ConsistencyLevel.LOCAL_ONE)
 
         # check data
-        for i in xrange(0, keys):
+        for i in range(0, keys):
             query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
         session.shutdown()
 
@@ -118,31 +128,35 @@ class TestRebuild(Tester):
         # manually raise exception from cmd1 thread
         # see http://stackoverflow.com/a/1854263
         if cmd1.thread_exc_info is not None:
-            raise cmd1.thread_exc_info[1], None, cmd1.thread_exc_info[2]
+            raise cmd1.thread_exc_info[1].with_traceback(cmd1.thread_exc_info[2])
 
         # exactly 1 of the two nodetool calls should fail
         # usually it will be the one in the main thread,
         # but occasionally it wins the race with the one in the secondary thread,
         # so we check that one succeeded and the other failed
-        self.assertEqual(self.rebuild_errors, 1,
-                         msg='rebuild errors should be 1, but found {}. Concurrent rebuild should not be allowed, but one rebuild command should have succeeded.'.format(self.rebuild_errors))
+        assert self.rebuild_errors == 1, \
+            'rebuild errors should be 1, but found {}. Concurrent rebuild should not be allowed, but one rebuild command should have succeeded.'.format(self.rebuild_errors)
 
         # check data
-        for i in xrange(0, keys):
+        for i in range(0, keys):
             query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
 
+    @flaky
     @since('2.2')
-    def resumable_rebuild_test(self):
+    def test_resumable_rebuild(self):
         """
         @jira_ticket CASSANDRA-10810
 
         Test rebuild operation is resumable
         """
-        self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Error while rebuilding node',
-                                                                     r'Streaming error occurred on session with peer 127.0.0.3',
-                                                                     r'Remote peer 127.0.0.3 failed stream session',
-                                                                     r'Streaming error occurred on session with peer 127.0.0.3:7000',
-                                                                     r'Remote peer 127.0.0.3:7000 failed stream session']
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            r'Error while rebuilding node',
+            r'Streaming error occurred on session with peer 127.0.0.3',
+            r'Remote peer 127.0.0.3 failed stream session',
+            r'Streaming error occurred on session with peer 127.0.0.3:7000',
+            r'Remote peer 127.0.0.3:7000 failed stream session'
+        ]
+
         cluster = self.cluster
         cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
 
@@ -203,32 +217,32 @@ class TestRebuild(Tester):
         node3.byteman_submit(script)
 
         # First rebuild must fail and data must be incomplete
-        with self.assertRaises(ToolError, msg='Unexpected: SUCCEED'):
-            debug('Executing first rebuild -> '),
+        with pytest.raises(ToolError, msg='Unexpected: SUCCEED'):
+            logger.debug('Executing first rebuild -> '),
             node3.nodetool('rebuild dc1')
-        debug('Expected: FAILED')
+        logger.debug('Expected: FAILED')
 
         session.execute('USE ks')
-        with self.assertRaises(AssertionError, msg='Unexpected: COMPLETE'):
-            debug('Checking data is complete -> '),
-            for i in xrange(0, 20000):
+        with pytest.raises(AssertionError, msg='Unexpected: COMPLETE'):
+            logger.debug('Checking data is complete -> '),
+            for i in range(0, 20000):
                 query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
-        debug('Expected: INCOMPLETE')
+        logger.debug('Expected: INCOMPLETE')
 
-        debug('Executing second rebuild -> '),
+        logger.debug('Executing second rebuild -> '),
         node3.nodetool('rebuild dc1')
-        debug('Expected: SUCCEED')
+        logger.debug('Expected: SUCCEED')
 
         # Check all streaming sessions completed, streamed ranges are skipped and verify streamed data
         node3.watch_log_for('All sessions completed')
         node3.watch_log_for('Skipping streaming those ranges.')
-        debug('Checking data is complete -> '),
-        for i in xrange(0, 20000):
+        logger.debug('Checking data is complete -> '),
+        for i in range(0, 20000):
             query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
-        debug('Expected: COMPLETE')
+        logger.debug('Expected: COMPLETE')
 
     @since('3.6')
-    def rebuild_ranges_test(self):
+    def test_rebuild_ranges(self):
         """
         @jira_ticket CASSANDRA-10406
         """
@@ -285,16 +299,16 @@ class TestRebuild(Tester):
 
         # check data is sent by stopping node1
         node1.stop()
-        for i in xrange(0, keys):
+        for i in range(0, keys):
             query_c1c2(session, i, ConsistencyLevel.ONE)
         # ks2 should not be streamed
         session.execute('USE ks2')
-        for i in xrange(0, keys):
+        for i in range(0, keys):
             query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
 
     @since('3.10')
-    @no_vnodes()
-    def disallow_rebuild_nonlocal_range_test(self):
+    @pytest.mark.no_vnodes
+    def test_disallow_rebuild_nonlocal_range(self):
         """
         @jira_ticket CASSANDRA-9875
         Verifies that nodetool rebuild throws an error when an operator
@@ -322,12 +336,12 @@ class TestRebuild(Tester):
         session = self.patient_exclusive_cql_connection(node1)
         session.execute("CREATE KEYSPACE ks1 WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
 
-        with self.assertRaisesRegexp(ToolError, 'is not a range that is owned by this node'):
+        with pytest.raises(ToolError, match='is not a range that is owned by this node'):
             node1.nodetool('rebuild -ks ks1 -ts (%s,%s]' % (node1_token, node2_token))
 
     @since('3.10')
-    @no_vnodes()
-    def disallow_rebuild_from_nonreplica_test(self):
+    @pytest.mark.no_vnodes
+    def test_disallow_rebuild_from_nonreplica(self):
         """
         @jira_ticket CASSANDRA-9875
         Verifies that nodetool rebuild throws an error when an operator
@@ -358,12 +372,12 @@ class TestRebuild(Tester):
         session = self.patient_exclusive_cql_connection(node1)
         session.execute("CREATE KEYSPACE ks1 WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
 
-        with self.assertRaisesRegexp(ToolError, 'Unable to find sufficient sources for streaming range'):
+        with pytest.raises(ToolError, message='Unable to find sufficient sources for streaming range'):
             node1.nodetool('rebuild -ks ks1 -ts (%s,%s] -s %s' % (node3_token, node1_token, node3_address))
 
     @since('3.10')
-    @no_vnodes()
-    def rebuild_with_specific_sources_test(self):
+    @pytest.mark.no_vnodes
+    def test_rebuild_with_specific_sources(self):
         """
         @jira_ticket CASSANDRA-9875
         Verifies that an operator can specify specific sources to use
@@ -426,18 +440,18 @@ class TestRebuild(Tester):
 
         # verify that node2 streamed to node3
         log_matches = node2.grep_log('Session with %s is complete' % node3.address_for_current_version())
-        self.assertTrue(len(log_matches) > 0)
+        assert len(log_matches) > 0
 
         # verify that node1 did not participate
         log_matches = node1.grep_log('streaming plan for Rebuild')
-        self.assertEqual(len(log_matches), 0)
+        assert len(log_matches) == 0
 
         # check data is sent by stopping node1, node2
         node1.stop()
         node2.stop()
-        for i in xrange(0, keys):
+        for i in range(0, keys):
             query_c1c2(session, i, ConsistencyLevel.ONE)
         # ks2 should not be streamed
         session.execute('USE ks2')
-        for i in xrange(0, keys):
+        for i in range(0, keys):
             query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/repair_tests/deprecated_repair_test.py
----------------------------------------------------------------------
diff --git a/repair_tests/deprecated_repair_test.py b/repair_tests/deprecated_repair_test.py
index e438f53..4c60664 100644
--- a/repair_tests/deprecated_repair_test.py
+++ b/repair_tests/deprecated_repair_test.py
@@ -1,15 +1,20 @@
+import pytest
+import logging
+
 from distutils.version import LooseVersion
 
 from cassandra import ConsistencyLevel
 from ccmlib.common import is_win
 
-from dtest import Tester, debug, create_ks, create_cf
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import assert_length_equal
 from tools.data import insert_c1c2
-from tools.decorators import since
 from tools.jmxutils import (JolokiaAgent, make_mbean,
                             remove_perf_disable_shared_mem)
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 @since("2.2", max_version="4")
 class TestDeprecatedRepairAPI(Tester):
@@ -19,7 +24,7 @@ class TestDeprecatedRepairAPI(Tester):
     Test if deprecated repair JMX API runs with expected parameters
     """
 
-    def force_repair_async_1_test(self):
+    def test_force_repair_async_1(self):
         """
         test forceRepairAsync(String keyspace, boolean isSequential,
                               Collection<String> dataCenters,
@@ -28,15 +33,15 @@ class TestDeprecatedRepairAPI(Tester):
         """
         opt = self._deprecated_repair_jmx("forceRepairAsync(java.lang.String,boolean,java.util.Collection,java.util.Collection,boolean,boolean,[Ljava.lang.String;)",
                                           ['ks', True, [], [], False, False, ["cf"]])
-        self.assertEqual(opt["parallelism"], "parallel" if is_win() else "sequential", opt)
-        self.assertEqual(opt["primary_range"], "false", opt)
-        self.assertEqual(opt["incremental"], "true", opt)
-        self.assertEqual(opt["job_threads"], "1", opt)
-        self.assertEqual(opt["data_centers"], "[]", opt)
-        self.assertEqual(opt["hosts"], "[]", opt)
-        self.assertEqual(opt["column_families"], "[cf]", opt)
-
-    def force_repair_async_2_test(self):
+        assert opt["parallelism"], "parallel" if is_win() else "sequential" == opt
+        assert opt["primary_range"], "false" == opt
+        assert opt["incremental"], "true" == opt
+        assert opt["job_threads"], "1" == opt
+        assert opt["data_centers"], "[]" == opt
+        assert opt["hosts"], "[]" == opt
+        assert opt["column_families"], "[cf]" == opt
+
+    def test_force_repair_async_2(self):
         """
         test forceRepairAsync(String keyspace, int parallelismDegree,
                               Collection<String> dataCenters,
@@ -45,15 +50,15 @@ class TestDeprecatedRepairAPI(Tester):
         """
         opt = self._deprecated_repair_jmx("forceRepairAsync(java.lang.String,int,java.util.Collection,java.util.Collection,boolean,boolean,[Ljava.lang.String;)",
                                           ['ks', 1, [], [], True, True, []])
-        self.assertEqual(opt["parallelism"], "parallel", opt)
-        self.assertEqual(opt["primary_range"], "true", opt)
-        self.assertEqual(opt["incremental"], "false", opt)
-        self.assertEqual(opt["job_threads"], "1", opt)
-        self.assertEqual(opt["data_centers"], "[]", opt)
-        self.assertEqual(opt["hosts"], "[]", opt)
-        self.assertEqual(opt["column_families"], "[]", opt)
-
-    def force_repair_async_3_test(self):
+        assert opt["parallelism"], "parallel" == opt
+        assert opt["primary_range"], "true" == opt
+        assert opt["incremental"], "false" == opt
+        assert opt["job_threads"], "1" == opt
+        assert opt["data_centers"], "[]" == opt
+        assert opt["hosts"], "[]" == opt
+        assert opt["column_families"], "[]" == opt
+
+    def test_force_repair_async_3(self):
         """
         test forceRepairAsync(String keyspace, boolean isSequential,
                               boolean isLocal, boolean primaryRange,
@@ -61,15 +66,15 @@ class TestDeprecatedRepairAPI(Tester):
         """
         opt = self._deprecated_repair_jmx("forceRepairAsync(java.lang.String,boolean,boolean,boolean,boolean,[Ljava.lang.String;)",
                                           ['ks', False, False, False, False, ["cf"]])
-        self.assertEqual(opt["parallelism"], "parallel", opt)
-        self.assertEqual(opt["primary_range"], "false", opt)
-        self.assertEqual(opt["incremental"], "true", opt)
-        self.assertEqual(opt["job_threads"], "1", opt)
-        self.assertEqual(opt["data_centers"], "[]", opt)
-        self.assertEqual(opt["hosts"], "[]", opt)
-        self.assertEqual(opt["column_families"], "[cf]", opt)
-
-    def force_repair_range_async_1_test(self):
+        assert opt["parallelism"], "parallel" == opt
+        assert opt["primary_range"], "false" == opt
+        assert opt["incremental"], "true" == opt
+        assert opt["job_threads"], "1" == opt
+        assert opt["data_centers"], "[]" == opt
+        assert opt["hosts"], "[]" == opt
+        assert opt["column_families"], "[cf]" == opt
+
+    def test_force_repair_range_async_1(self):
         """
         test forceRepairRangeAsync(String beginToken, String endToken,
                                    String keyspaceName, boolean isSequential,
@@ -79,16 +84,16 @@ class TestDeprecatedRepairAPI(Tester):
         """
         opt = self._deprecated_repair_jmx("forceRepairRangeAsync(java.lang.String,java.lang.String,java.lang.String,boolean,java.util.Collection,java.util.Collection,boolean,[Ljava.lang.String;)",
                                           ["0", "1000", "ks", True, ["dc1"], [], False, ["cf"]])
-        self.assertEqual(opt["parallelism"], "parallel" if is_win() else "sequential", opt)
-        self.assertEqual(opt["primary_range"], "false", opt)
-        self.assertEqual(opt["incremental"], "true", opt)
-        self.assertEqual(opt["job_threads"], "1", opt)
-        self.assertEqual(opt["data_centers"], "[dc1]", opt)
-        self.assertEqual(opt["hosts"], "[]", opt)
-        self.assertEqual(opt["ranges"], "1", opt)
-        self.assertEqual(opt["column_families"], "[cf]", opt)
-
-    def force_repair_range_async_2_test(self):
+        assert opt["parallelism"], "parallel" if is_win() else "sequential" == opt
+        assert opt["primary_range"], "false" == opt
+        assert opt["incremental"], "true" == opt
+        assert opt["job_threads"], "1" == opt
+        assert opt["data_centers"], "[dc1]" == opt
+        assert opt["hosts"], "[]" == opt
+        assert opt["ranges"], "1" == opt
+        assert opt["column_families"], "[cf]" == opt
+
+    def test_force_repair_range_async_2(self):
         """
         test forceRepairRangeAsync(String beginToken, String endToken,
                                    String keyspaceName, int parallelismDegree,
@@ -98,16 +103,16 @@ class TestDeprecatedRepairAPI(Tester):
         """
         opt = self._deprecated_repair_jmx("forceRepairRangeAsync(java.lang.String,java.lang.String,java.lang.String,int,java.util.Collection,java.util.Collection,boolean,[Ljava.lang.String;)",
                                           ["0", "1000", "ks", 2, [], [], True, ["cf"]])
-        self.assertEqual(opt["parallelism"], "parallel" if is_win() else "dc_parallel", opt)
-        self.assertEqual(opt["primary_range"], "false", opt)
-        self.assertEqual(opt["incremental"], "false", opt)
-        self.assertEqual(opt["job_threads"], "1", opt)
-        self.assertEqual(opt["data_centers"], "[]", opt)
-        self.assertEqual(opt["hosts"], "[]", opt)
-        self.assertEqual(opt["ranges"], "1", opt)
-        self.assertEqual(opt["column_families"], "[cf]", opt)
-
-    def force_repair_range_async_3_test(self):
+        assert opt["parallelism"], "parallel" if is_win() else "dc_parallel" == opt
+        assert opt["primary_range"], "false" == opt
+        assert opt["incremental"], "false" == opt
+        assert opt["job_threads"], "1" == opt
+        assert opt["data_centers"], "[]" == opt
+        assert opt["hosts"], "[]" == opt
+        assert opt["ranges"], "1" == opt
+        assert opt["column_families"], "[cf]" == opt
+
+    def test_force_repair_range_async_3(self):
         """
         test forceRepairRangeAsync(String beginToken, String endToken,
                                    String keyspaceName, boolean isSequential,
@@ -116,14 +121,14 @@ class TestDeprecatedRepairAPI(Tester):
         """
         opt = self._deprecated_repair_jmx("forceRepairRangeAsync(java.lang.String,java.lang.String,java.lang.String,boolean,boolean,boolean,[Ljava.lang.String;)",
                                           ["0", "1000", "ks", True, True, True, ["cf"]])
-        self.assertEqual(opt["parallelism"], "parallel" if is_win() else "sequential", opt)
-        self.assertEqual(opt["primary_range"], "false", opt)
-        self.assertEqual(opt["incremental"], "false", opt)
-        self.assertEqual(opt["job_threads"], "1", opt)
-        self.assertEqual(opt["data_centers"], "[dc1]", opt)
-        self.assertEqual(opt["hosts"], "[]", opt)
-        self.assertEqual(opt["ranges"], "1", opt)
-        self.assertEqual(opt["column_families"], "[cf]", opt)
+        assert opt["parallelism"], "parallel" if is_win() else "sequential" == opt
+        assert opt["primary_range"], "false" == opt
+        assert opt["incremental"], "false" == opt
+        assert opt["job_threads"], "1" == opt
+        assert opt["data_centers"], "[dc1]" == opt
+        assert opt["hosts"], "[]" == opt
+        assert opt["ranges"], "1" == opt
+        assert opt["column_families"], "[cf]" == opt
 
     def _deprecated_repair_jmx(self, method, arguments):
         """
@@ -135,7 +140,7 @@ class TestDeprecatedRepairAPI(Tester):
         """
         cluster = self.cluster
 
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate([1, 1])
         node1, node2 = cluster.nodelist()
         remove_perf_disable_shared_mem(node1)
@@ -152,7 +157,7 @@ class TestDeprecatedRepairAPI(Tester):
         mbean = make_mbean('db', 'StorageService')
         with JolokiaAgent(node1) as jmx:
             # assert repair runs and returns valid cmd number
-            self.assertEqual(jmx.execute_method(mbean, method, arguments), 1)
+            assert jmx.execute_method(mbean, method, arguments) == 1
         # wait for log to start
         node1.watch_log_for("Starting repair command")
         # get repair parameters from the log
@@ -165,7 +170,7 @@ class TestDeprecatedRepairAPI(Tester):
         line, m = line[0]
 
         if supports_pull_repair:
-            self.assertEqual(m.group("pullrepair"), "false", "Pull repair cannot be enabled through the deprecated API so the pull repair option should always be false.")
+            assert m.group("pullrepair"), "false" == "Pull repair cannot be enabled through the deprecated API so the pull repair option should always be false."
 
         return {"parallelism": m.group("parallelism"),
                 "primary_range": m.group("pr"),

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/repair_tests/incremental_repair_test.py
----------------------------------------------------------------------
diff --git a/repair_tests/incremental_repair_test.py b/repair_tests/incremental_repair_test.py
index e3017c2..4791e9a 100644
--- a/repair_tests/incremental_repair_test.py
+++ b/repair_tests/incremental_repair_test.py
@@ -1,8 +1,11 @@
 import time
+import pytest
+import re
+import logging
+
 from datetime import datetime
 from collections import Counter, namedtuple
 from re import findall, compile
-from unittest import skip
 from uuid import UUID, uuid1
 
 from cassandra import ConsistencyLevel
@@ -10,13 +13,14 @@ from cassandra.query import SimpleStatement
 from cassandra.metadata import Murmur3Token
 from ccmlib.common import is_win
 from ccmlib.node import Node, ToolError
-from nose.plugins.attrib import attr
 
-from dtest import Tester, debug, create_ks, create_cf
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import assert_almost_equal, assert_one
 from tools.data import insert_c1c2
-from tools.decorators import since, no_vnodes
-from tools.misc import new_node
+from tools.misc import new_node, ImmutableMapping
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class ConsistentState(object):
@@ -29,7 +33,12 @@ class ConsistentState(object):
 
 
 class TestIncRepair(Tester):
-    ignore_log_patterns = (r'Can\'t send migration request: node.*is down',)
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            r'Can\'t send migration request: node.*is down'
+        )
 
     @classmethod
     def _get_repaired_data(cls, node, keyspace):
@@ -41,7 +50,7 @@ class TestIncRepair(Tester):
         out = node.run_sstablemetadata(keyspace=keyspace).stdout
 
         def matches(pattern):
-            return filter(None, [pattern.match(l) for l in out.split('\n')])
+            return filter(None, [pattern.match(l) for l in out.decode("utf-8").split('\n')])
         names = [m.group(1) for m in matches(_sstable_name)]
         repaired_times = [int(m.group(1)) for m in matches(_repaired_at)]
 
@@ -57,37 +66,38 @@ class TestIncRepair(Tester):
     def assertNoRepairedSSTables(self, node, keyspace):
         """ Checks that no sstables are marked repaired, and none are marked pending repair """
         data = self._get_repaired_data(node, keyspace)
-        self.assertTrue(all([t.repaired == 0 for t in data]), '{}'.format(data))
-        self.assertTrue(all([t.pending_id is None for t in data]))
+        assert all([t.repaired == 0 for t in data]), '{}'.format(data)
+        assert all([t.pending_id is None for t in data])
 
     def assertAllPendingRepairSSTables(self, node, keyspace, pending_id=None):
         """ Checks that no sstables are marked repaired, and all are marked pending repair """
         data = self._get_repaired_data(node, keyspace)
-        self.assertTrue(all([t.repaired == 0 for t in data]), '{}'.format(data))
+        assert all([t.repaired == 0 for t in data]), '{}'.format(data)
         if pending_id:
-            self.assertTrue(all([t.pending_id == pending_id for t in data]))
+            assert all([t.pending_id == pending_id for t in data])
         else:
-            self.assertTrue(all([t.pending_id is not None for t in data]))
+            assert all([t.pending_id is not None for t in data])
 
     def assertAllRepairedSSTables(self, node, keyspace):
         """ Checks that all sstables are marked repaired, and none are marked pending repair """
         data = self._get_repaired_data(node, keyspace)
-        self.assertTrue(all([t.repaired > 0 for t in data]), '{}'.format(data))
-        self.assertTrue(all([t.pending_id is None for t in data]), '{}'.format(data))
+        assert all([t.repaired > 0 for t in data]), '{}'.format(data)
+        assert all([t.pending_id is None for t in data]), '{}'.format(data)
 
     def assertRepairedAndUnrepaired(self, node, keyspace):
         """ Checks that a node has both repaired and unrepaired sstables for a given keyspace """
         data = self._get_repaired_data(node, keyspace)
-        self.assertTrue(any([t.repaired > 0 for t in data]), '{}'.format(data))
-        self.assertTrue(any([t.repaired == 0 for t in data]), '{}'.format(data))
-        self.assertTrue(all([t.pending_id is None for t in data]), '{}'.format(data))
+        assert any([t.repaired > 0 for t in data]), '{}'.format(data)
+        assert any([t.repaired == 0 for t in data]), '{}'.format(data)
+        assert all([t.pending_id is None for t in data]), '{}'.format(data)
 
     @since('4.0')
-    def consistent_repair_test(self):
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'num_tokens': 1, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+    def test_consistent_repair(self):
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'num_tokens': 1,
+                                                                                     'commitlog_sync_period_in_ms': 500})
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         # make data inconsistent between nodes
         session = self.patient_exclusive_cql_connection(node3)
@@ -115,15 +125,15 @@ class TestIncRepair(Tester):
         node1.start(wait_other_notice=True, wait_for_binary_proto=True)
 
         # flush and check that no sstables are marked repaired
-        for node in cluster.nodelist():
+        for node in self.cluster.nodelist():
             node.flush()
             self.assertNoRepairedSSTables(node, 'ks')
             session = self.patient_exclusive_cql_connection(node)
             results = list(session.execute("SELECT * FROM system.repairs"))
-            self.assertEqual(len(results), 0, str(results))
+            assert len(results) == 0, str(results)
 
         # disable compaction so we can verify sstables are marked pending repair
-        for node in cluster.nodelist():
+        for node in self.cluster.nodelist():
             node.nodetool('disableautocompaction ks tbl')
 
         node1.repair(options=['ks'])
@@ -131,28 +141,28 @@ class TestIncRepair(Tester):
         # check that all participating nodes have the repair recorded in their system
         # table, that all nodes are listed as participants, and that all sstables are
         # (still) marked pending repair
-        expected_participants = {n.address() for n in cluster.nodelist()}
-        expected_participants_wp = {n.address_and_port() for n in cluster.nodelist()}
+        expected_participants = {n.address() for n in self.cluster.nodelist()}
+        expected_participants_wp = {n.address_and_port() for n in self.cluster.nodelist()}
         recorded_pending_ids = set()
-        for node in cluster.nodelist():
+        for node in self.cluster.nodelist():
             session = self.patient_exclusive_cql_connection(node)
             results = list(session.execute("SELECT * FROM system.repairs"))
-            self.assertEqual(len(results), 1)
+            assert len(results) == 1
             result = results[0]
-            self.assertEqual(set(result.participants), expected_participants)
+            assert set(result.participants) == expected_participants
             if hasattr(result, "participants_wp"):
-                self.assertEqual(set(result.participants_wp), expected_participants_wp)
-            self.assertEqual(result.state, ConsistentState.FINALIZED, "4=FINALIZED")
+                assert set(result.participants_wp) == expected_participants_wp
+            assert result.state, ConsistentState.FINALIZED == "4=FINALIZED"
             pending_id = result.parent_id
             self.assertAllPendingRepairSSTables(node, 'ks', pending_id)
             recorded_pending_ids.add(pending_id)
 
-        self.assertEqual(len(recorded_pending_ids), 1)
+        assert len(recorded_pending_ids) == 1
 
         # sstables are compacted out of pending repair by a compaction
         # task, we disabled compaction earlier in the test, so here we
         # force the compaction and check that all sstables are promoted
-        for node in cluster.nodelist():
+        for node in self.cluster.nodelist():
             node.nodetool('compact ks tbl')
             self.assertAllRepairedSSTables(node, 'ks')
 
@@ -166,7 +176,7 @@ class TestIncRepair(Tester):
         ranges = {'\x00\x00\x00\x08K\xc2\xed\\<\xd3{X\x00\x00\x00\x08r\x04\x89[j\x81\xc4\xe6',
                   '\x00\x00\x00\x08r\x04\x89[j\x81\xc4\xe6\x00\x00\x00\x08\xd8\xcdo\x9e\xcbl\x83\xd4',
                   '\x00\x00\x00\x08\xd8\xcdo\x9e\xcbl\x83\xd4\x00\x00\x00\x08K\xc2\xed\\<\xd3{X'}
-        ranges = {buffer(b) for b in ranges}
+        ranges = {bytes(b, "Latin-1") for b in ranges}
 
         for node in self.cluster.nodelist():
             session = self.patient_exclusive_cql_connection(node)
@@ -177,8 +187,13 @@ class TestIncRepair(Tester):
                              {str(n.address()) + ":7000" for n in self.cluster.nodelist()},
                              ranges, now, now, ConsistentState.REPAIRING])  # 2=REPAIRING
 
+        # as we faked repairs and inserted directly into system.repairs table, the current
+        # implementation in trunk (LocalSessions) only pulls the sessions via callbacks or
+        # from the system.repairs table once at startup. we need to stop and start the nodes
+        # as a way to force the repair sessions to get populated into the correct in-memory objects
         time.sleep(1)
         for node in self.cluster.nodelist():
+            node.flush()
             node.stop(gently=False)
 
         for node in self.cluster.nodelist():
@@ -187,12 +202,13 @@ class TestIncRepair(Tester):
         return session_id
 
     @since('4.0')
-    def manual_session_fail_test(self):
+    def test_manual_session_fail(self):
         """ check manual failing of repair sessions via nodetool works properly """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'num_tokens': 1, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'num_tokens': 1,
+                                                                                     'commitlog_sync_period_in_ms': 500})
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         # make data inconsistent between nodes
         session = self.patient_exclusive_cql_connection(node3)
@@ -201,35 +217,37 @@ class TestIncRepair(Tester):
 
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin')
-            self.assertIn("no sessions", out.stdout)
+            assert "no sessions" in out.stdout
 
         session_id = self._make_fake_session('ks', 'tbl')
 
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin')
             lines = out.stdout.split('\n')
-            self.assertGreater(len(lines), 1)
+            assert len(lines) > 1
             line = lines[1]
-            self.assertIn(str(session_id), line)
-            self.assertIn("REPAIRING", line)
+            assert re.match(str(session_id), line)
+            assert "REPAIRING" in line
 
         node1.nodetool("repair_admin --cancel {}".format(session_id))
 
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin --all')
             lines = out.stdout.split('\n')
-            self.assertGreater(len(lines), 1)
+            assert len(lines) > 1
             line = lines[1]
-            self.assertIn(str(session_id), line)
-            self.assertIn("FAILED", line)
+            assert re.match(str(session_id), line)
+            assert "FAILED" in line
 
     @since('4.0')
-    def manual_session_cancel_non_coordinator_failure_test(self):
+    def test_manual_session_cancel_non_coordinator_failure(self):
         """ check manual failing of repair sessions via a node other than the coordinator fails """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'num_tokens': 1, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'num_tokens': 1,
+                                                                                     'commitlog_sync_period_in_ms': 500})
+
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         # make data inconsistent between nodes
         session = self.patient_exclusive_cql_connection(node3)
@@ -238,17 +256,17 @@ class TestIncRepair(Tester):
 
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin')
-            self.assertIn("no sessions", out.stdout)
+            assert "no sessions" in out.stdout
 
         session_id = self._make_fake_session('ks', 'tbl')
 
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin')
             lines = out.stdout.split('\n')
-            self.assertGreater(len(lines), 1)
+            assert len(lines) > 1
             line = lines[1]
-            self.assertIn(str(session_id), line)
-            self.assertIn("REPAIRING", line)
+            assert re.match(str(session_id), line)
+            assert "REPAIRING" in line
 
         try:
             node2.nodetool("repair_admin --cancel {}".format(session_id))
@@ -260,18 +278,19 @@ class TestIncRepair(Tester):
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin')
             lines = out.stdout.split('\n')
-            self.assertGreater(len(lines), 1)
+            assert len(lines) > 1
             line = lines[1]
-            self.assertIn(str(session_id), line)
-            self.assertIn("REPAIRING", line)
+            assert re.match(str(session_id), line)
+            assert "REPAIRING" in line
 
     @since('4.0')
-    def manual_session_force_cancel_test(self):
+    def test_manual_session_force_cancel(self):
         """ check manual failing of repair sessions via a non-coordinator works if the --force flag is set """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'num_tokens': 1, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'num_tokens': 1,
+                                                                                     'commitlog_sync_period_in_ms': 500})
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         # make data inconsistent between nodes
         session = self.patient_exclusive_cql_connection(node3)
@@ -280,29 +299,29 @@ class TestIncRepair(Tester):
 
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin')
-            self.assertIn("no sessions", out.stdout)
+            assert "no sessions" in out.stdout
 
         session_id = self._make_fake_session('ks', 'tbl')
 
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin')
             lines = out.stdout.split('\n')
-            self.assertGreater(len(lines), 1)
+            assert len(lines) > 1
             line = lines[1]
-            self.assertIn(str(session_id), line)
-            self.assertIn("REPAIRING", line)
+            assert re.match(str(session_id), line)
+            assert "REPAIRING" in line
 
         node2.nodetool("repair_admin --cancel {} --force".format(session_id))
 
         for node in self.cluster.nodelist():
             out = node.nodetool('repair_admin --all')
             lines = out.stdout.split('\n')
-            self.assertGreater(len(lines), 1)
+            assert len(lines) > 1
             line = lines[1]
-            self.assertIn(str(session_id), line)
-            self.assertIn("FAILED", line)
+            assert re.match(str(session_id), line)
+            assert "FAILED" in line
 
-    def sstable_marking_test(self):
+    def test_sstable_marking(self):
         """
         * Launch a three node cluster
         * Stop node3
@@ -311,11 +330,10 @@ class TestIncRepair(Tester):
         * Issue an incremental repair, and wait for it to finish
         * Run sstablemetadata on every node, assert that all sstables are marked as repaired
         """
-        cluster = self.cluster
         # hinted handoff can create SSTable that we don't need after node3 restarted
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false'})
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         node3.stop(gently=True)
 
@@ -331,20 +349,20 @@ class TestIncRepair(Tester):
         node3.watch_log_for("Initializing keyspace1.standard1", filename=log_file)
         # wait for things to settle before starting repair
         time.sleep(1)
-        if cluster.version() >= "2.2":
+        if self.cluster.version() >= "2.2":
             node3.repair()
         else:
             node3.nodetool("repair -par -inc")
 
-        if cluster.version() >= '4.0':
+        if self.cluster.version() >= '4.0':
             # sstables are compacted out of pending repair by a compaction
-            for node in cluster.nodelist():
+            for node in self.cluster.nodelist():
                 node.nodetool('compact keyspace1 standard1')
 
-        for out in (node.run_sstablemetadata(keyspace='keyspace1').stdout for node in cluster.nodelist()):
-            self.assertNotIn('Repaired at: 0', out)
+        for out in (node.run_sstablemetadata(keyspace='keyspace1').stdout for node in self.cluster.nodelist()):
+            assert 'Repaired at: 0' not in out.decode("utf-8")
 
-    def multiple_repair_test(self):
+    def test_multiple_repair(self):
         """
         * Launch a three node cluster
         * Create a keyspace with RF 3 and a table
@@ -370,21 +388,21 @@ class TestIncRepair(Tester):
         create_ks(session, 'ks', 3)
         create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
 
-        debug("insert data")
+        logger.debug("insert data")
 
-        insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)
+        insert_c1c2(session, keys=list(range(1, 50)), consistency=ConsistencyLevel.ALL)
         node1.flush()
 
-        debug("bringing down node 3")
+        logger.debug("bringing down node 3")
         node3.flush()
         node3.stop(gently=False)
 
-        debug("inserting additional data into node 1 and 2")
-        insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)
+        logger.debug("inserting additional data into node 1 and 2")
+        insert_c1c2(session, keys=list(range(50, 100)), consistency=ConsistencyLevel.TWO)
         node1.flush()
         node2.flush()
 
-        debug("restarting and repairing node 3")
+        logger.debug("restarting and repairing node 3")
         node3.start(wait_for_binary_proto=True)
 
         if cluster.version() >= "2.2":
@@ -397,15 +415,15 @@ class TestIncRepair(Tester):
         if is_win:
             time.sleep(2)
 
-        debug("stopping node 2")
+        logger.debug("stopping node 2")
         node2.stop(gently=False)
 
-        debug("inserting data in nodes 1 and 3")
-        insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)
+        logger.debug("inserting data in nodes 1 and 3")
+        insert_c1c2(session, keys=list(range(100, 150)), consistency=ConsistencyLevel.TWO)
         node1.flush()
         node3.flush()
 
-        debug("start and repair node 2")
+        logger.debug("start and repair node 2")
         node2.start(wait_for_binary_proto=True)
 
         if cluster.version() >= "2.2":
@@ -413,7 +431,7 @@ class TestIncRepair(Tester):
         else:
             node2.nodetool("repair -par -inc")
 
-        debug("replace node and check data integrity")
+        logger.debug("replace node and check data integrity")
         node3.stop(gently=False)
         node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))
         cluster.add(node5, False)
@@ -421,7 +439,7 @@ class TestIncRepair(Tester):
 
         assert_one(session, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
 
-    def sstable_repairedset_test(self):
+    def test_sstable_repairedset(self):
         """
         * Launch a two node cluster
         * Insert data with stress
@@ -437,10 +455,9 @@ class TestIncRepair(Tester):
         * Run sstablemetadata on both nodes again, pipe to a new file
         * Verify repairs occurred and repairedAt was updated
         """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
-        cluster.populate(2).start()
-        node1, node2 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false'})
+        self.cluster.populate(2).start()
+        node1, node2 = self.cluster.nodelist()
         node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])
 
         node1.flush()
@@ -451,53 +468,57 @@ class TestIncRepair(Tester):
         node2.run_sstablerepairedset(keyspace='keyspace1')
         node2.start(wait_for_binary_proto=True)
 
-        initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout
-        initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout
+        initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout.decode("utf-8")
+        initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout.decode("utf-8")
 
         matches = findall('(?<=Repaired at:).*', '\n'.join([initialOut1, initialOut2]))
-        debug("Repair timestamps are: {}".format(matches))
+        logger.debug("Repair timestamps are: {}".format(matches))
 
         uniquematches = set(matches)
         matchcount = Counter(matches)
 
-        self.assertGreaterEqual(len(uniquematches), 2, uniquematches)
+        assert len(uniquematches) >= 2, uniquematches
 
-        self.assertGreaterEqual(max(matchcount), 1, matchcount)
+        assert len(max(matchcount)) >= 1, matchcount
 
-        self.assertIn('Repaired at: 0', '\n'.join([initialOut1, initialOut2]))
+        assert re.search('Repaired at: 0', '\n'.join([initialOut1, initialOut2]))
 
         node1.stop()
         node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])
         node2.flush()
         node1.start(wait_for_binary_proto=True)
 
-        if cluster.version() >= "2.2":
+        if self.cluster.version() >= "2.2":
             node1.repair()
         else:
             node1.nodetool("repair -par -inc")
 
-        if cluster.version() >= '4.0':
+        if self.cluster.version() >= '4.0':
             # sstables are compacted out of pending repair by a compaction
-            for node in cluster.nodelist():
+            for node in self.cluster.nodelist():
                 node.nodetool('compact keyspace1 standard1')
 
         finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout
+        if not isinstance(finalOut1, str):
+            finalOut1 = finalOut1.decode("utf-8")
         finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout
+        if not isinstance(finalOut2, str):
+            finalOut2 = finalOut2.decode("utf-8")
 
         matches = findall('(?<=Repaired at:).*', '\n'.join([finalOut1, finalOut2]))
 
-        debug(matches)
+        logger.debug(matches)
 
         uniquematches = set(matches)
         matchcount = Counter(matches)
 
-        self.assertGreaterEqual(len(uniquematches), 2)
+        assert len(uniquematches) >= 2
 
-        self.assertGreaterEqual(max(matchcount), 2)
+        assert len(max(matchcount)) >= 2
 
-        self.assertNotIn('Repaired at: 0', '\n'.join([finalOut1, finalOut2]))
+        assert not re.search('Repaired at: 0', '\n'.join([finalOut1, finalOut2]))
 
-    def compaction_test(self):
+    def test_compaction(self):
         """
         Test we can major compact after an incremental repair
         * Launch a three node cluster
@@ -543,22 +564,22 @@ class TestIncRepair(Tester):
             assert_one(session, "select val from tab where key =" + str(x), [1])
 
     @since("2.2")
-    def multiple_full_repairs_lcs_test(self):
+    def test_multiple_full_repairs_lcs(self):
         """
         @jira_ticket CASSANDRA-11172 - repeated full repairs should not cause infinite loop in getNextBackgroundTask
         """
         cluster = self.cluster
         cluster.populate(2).start(wait_for_binary_proto=True)
         node1, node2 = cluster.nodelist()
-        for x in xrange(0, 10):
+        for x in range(0, 10):
             node1.stress(['write', 'n=100k', 'no-warmup', '-rate', 'threads=10', '-schema', 'compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=10)', 'replication(factor=2)'])
             cluster.flush()
             cluster.wait_for_compactions()
             node1.nodetool("repair -full keyspace1 standard1")
 
-    @attr('long')
-    @skip('hangs CI')
-    def multiple_subsequent_repair_test(self):
+    @pytest.mark.env("long")
+    @pytest.mark.skip(reason='hangs CI')
+    def test_multiple_subsequent_repair(self):
         """
         @jira_ticket CASSANDRA-8366
 
@@ -576,55 +597,55 @@ class TestIncRepair(Tester):
         cluster.populate(3).start()
         node1, node2, node3 = cluster.nodelist()
 
-        debug("Inserting data with stress")
+        logger.debug("Inserting data with stress")
         node1.stress(['write', 'n=5M', 'no-warmup', '-rate', 'threads=10', '-schema', 'replication(factor=3)'])
 
-        debug("Flushing nodes")
+        logger.debug("Flushing nodes")
         cluster.flush()
 
-        debug("Waiting compactions to finish")
+        logger.debug("Waiting compactions to finish")
         cluster.wait_for_compactions()
 
         if self.cluster.version() >= '2.2':
-            debug("Repairing node1")
+            logger.debug("Repairing node1")
             node1.nodetool("repair")
-            debug("Repairing node2")
+            logger.debug("Repairing node2")
             node2.nodetool("repair")
-            debug("Repairing node3")
+            logger.debug("Repairing node3")
             node3.nodetool("repair")
         else:
-            debug("Repairing node1")
+            logger.debug("Repairing node1")
             node1.nodetool("repair -par -inc")
-            debug("Repairing node2")
+            logger.debug("Repairing node2")
             node2.nodetool("repair -par -inc")
-            debug("Repairing node3")
+            logger.debug("Repairing node3")
             node3.nodetool("repair -par -inc")
 
-        # Using "print" instead of debug() here is on purpose.  The compactions
+        # Using "print" instead of logger.debug() here is on purpose.  The compactions
         # take a long time and don't print anything by default, which can result
         # in the test being timed out after 20 minutes.  These print statements
         # prevent it from being timed out.
-        print "compacting node1"
+        print("compacting node1")
         node1.compact()
-        print "compacting node2"
+        print("compacting node2")
         node2.compact()
-        print "compacting node3"
+        print("compacting node3")
         node3.compact()
 
         # wait some time to be sure the load size is propagated between nodes
-        debug("Waiting for load size info to be propagated between nodes")
+        logger.debug("Waiting for load size info to be propagated between nodes")
         time.sleep(45)
 
-        load_size_in_kb = float(sum(map(lambda n: n.data_size(), [node1, node2, node3])))
+        load_size_in_kb = float(sum([n.data_size() for n in [node1, node2, node3]]))
         load_size = load_size_in_kb / 1024 / 1024
-        debug("Total Load size: {}GB".format(load_size))
+        logger.debug("Total Load size: {}GB".format(load_size))
 
         # There is still some overhead, but it's lot better. We tolerate 25%.
         expected_load_size = 4.5  # In GB
         assert_almost_equal(load_size, expected_load_size, error=0.25)
 
-    @attr('resource-intensive')
-    def sstable_marking_test_not_intersecting_all_ranges(self):
+    @pytest.mark.resource_intensive
+    def test_sstable_marking_not_intersecting_all_ranges(self):
         """
         @jira_ticket CASSANDRA-10299
         * Launch a four node cluster
@@ -636,21 +657,21 @@ class TestIncRepair(Tester):
         cluster.populate(4).start(wait_for_binary_proto=True)
         node1, node2, node3, node4 = cluster.nodelist()
 
-        debug("Inserting data with stress")
+        logger.debug("Inserting data with stress")
         node1.stress(['write', 'n=3', 'no-warmup', '-rate', 'threads=1', '-schema', 'replication(factor=3)'])
 
-        debug("Flushing nodes")
+        logger.debug("Flushing nodes")
         cluster.flush()
 
         repair_options = '' if self.cluster.version() >= '2.2' else '-inc -par'
 
-        debug("Repairing node 1")
+        logger.debug("Repairing node 1")
         node1.nodetool("repair {}".format(repair_options))
-        debug("Repairing node 2")
+        logger.debug("Repairing node 2")
         node2.nodetool("repair {}".format(repair_options))
-        debug("Repairing node 3")
+        logger.debug("Repairing node 3")
         node3.nodetool("repair {}".format(repair_options))
-        debug("Repairing node 4")
+        logger.debug("Repairing node 4")
         node4.nodetool("repair {}".format(repair_options))
 
         if cluster.version() >= '4.0':
@@ -659,16 +680,16 @@ class TestIncRepair(Tester):
                 node.nodetool('compact keyspace1 standard1')
 
         for out in (node.run_sstablemetadata(keyspace='keyspace1').stdout for node in cluster.nodelist() if len(node.get_sstables('keyspace1', 'standard1')) > 0):
-            self.assertNotIn('Repaired at: 0', out)
+            assert 'Repaired at: 0' not in out
 
-    @no_vnodes()
+    @pytest.mark.no_vnodes
     @since('4.0')
-    def move_test(self):
+    def test_move(self):
         """ Test repaired data remains in sync after a move """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(4, tokens=[0, 2**32, 2**48, -(2**32)]).start()
-        node1, node2, node3, node4 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'commitlog_sync_period_in_ms': 500})
+        self.cluster.populate(4, tokens=[0, 2**32, 2**48, -(2**32)]).start()
+        node1, node2, node3, node4 = self.cluster.nodelist()
 
         session = self.patient_exclusive_cql_connection(node3)
         session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 2}")
@@ -686,25 +707,25 @@ class TestIncRepair(Tester):
             session.execute(stmt, (v, v))
 
         # everything should be in sync
-        for node in cluster.nodelist():
+        for node in self.cluster.nodelist():
             result = node.repair(options=['ks', '--validate'])
-            self.assertIn("Repaired data is in sync", result.stdout)
+            assert "Repaired data is in sync" in result.stdout
 
         node2.nodetool('move {}'.format(2**16))
 
         # everything should still be in sync
-        for node in cluster.nodelist():
+        for node in self.cluster.nodelist():
             result = node.repair(options=['ks', '--validate'])
-            self.assertIn("Repaired data is in sync", result.stdout)
+            assert "Repaired data is in sync" in result.stdout
 
-    @no_vnodes()
+    @pytest.mark.no_vnodes
     @since('4.0')
-    def decommission_test(self):
+    def test_decommission(self):
         """ Test repaired data remains in sync after a decommission """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(4).start()
-        node1, node2, node3, node4 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'commitlog_sync_period_in_ms': 500})
+        self.cluster.populate(4).start()
+        node1, node2, node3, node4 = self.cluster.nodelist()
 
         session = self.patient_exclusive_cql_connection(node3)
         session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 2}")
@@ -722,25 +743,25 @@ class TestIncRepair(Tester):
             session.execute(stmt, (v, v))
 
         # everything should be in sync
-        for node in cluster.nodelist():
+        for node in self.cluster.nodelist():
             result = node.repair(options=['ks', '--validate'])
-            self.assertIn("Repaired data is in sync", result.stdout)
+            assert "Repaired data is in sync" in result.stdout
 
         node2.nodetool('decommission')
 
         # everything should still be in sync
         for node in [node1, node3, node4]:
             result = node.repair(options=['ks', '--validate'])
-            self.assertIn("Repaired data is in sync", result.stdout)
+            assert "Repaired data is in sync" in result.stdout
 
-    @no_vnodes()
+    @pytest.mark.no_vnodes
     @since('4.0')
-    def bootstrap_test(self):
+    def test_bootstrap(self):
         """ Test repaired data remains in sync after a bootstrap """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'commitlog_sync_period_in_ms': 500})
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         session = self.patient_exclusive_cql_connection(node3)
         session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 2}")
@@ -760,27 +781,28 @@ class TestIncRepair(Tester):
         # everything should be in sync
         for node in [node1, node2, node3]:
             result = node.repair(options=['ks', '--validate'])
-            self.assertIn("Repaired data is in sync", result.stdout)
+            assert "Repaired data is in sync" in result.stdout
 
         node4 = new_node(self.cluster)
         node4.start(wait_for_binary_proto=True)
 
-        self.assertEqual(len(self.cluster.nodelist()), 4)
+        assert len(self.cluster.nodelist()) == 4
         # everything should still be in sync
         for node in self.cluster.nodelist():
             result = node.repair(options=['ks', '--validate'])
-            self.assertIn("Repaired data is in sync", result.stdout)
+            assert "Repaired data is in sync" in result.stdout
 
     @since('4.0')
-    def force_test(self):
+    def test_force(self):
         """
         forcing an incremental repair should incrementally repair any nodes
         that are up, but should not promote the sstables to repaired
         """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'num_tokens': 1, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'num_tokens': 1,
+                                                                                     'commitlog_sync_period_in_ms': 500})
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         session = self.patient_exclusive_cql_connection(node3)
         session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
@@ -793,7 +815,7 @@ class TestIncRepair(Tester):
         node2.stop()
 
         # repair should fail because node2 is down
-        with self.assertRaises(ToolError):
+        with pytest.raises(ToolError):
             node1.repair(options=['ks'])
 
         # run with force flag
@@ -804,15 +826,16 @@ class TestIncRepair(Tester):
         self.assertNoRepairedSSTables(node2, 'ks')
 
     @since('4.0')
-    def hosts_test(self):
+    def test_hosts(self):
         """
         running an incremental repair with hosts specified should incrementally repair
         the given nodes, but should not promote the sstables to repaired
         """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'num_tokens': 1, 'commitlog_sync_period_in_ms': 500})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'num_tokens': 1,
+                                                                                     'commitlog_sync_period_in_ms': 500})
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         session = self.patient_exclusive_cql_connection(node3)
         session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
@@ -830,18 +853,17 @@ class TestIncRepair(Tester):
         self.assertNoRepairedSSTables(node2, 'ks')
 
     @since('4.0')
-    def subrange_test(self):
+    def test_subrange(self):
         """
         running an incremental repair with hosts specified should incrementally repair
         the given nodes, but should not promote the sstables to repaired
         """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
-                                                  'num_tokens': 1,
-                                                  'commitlog_sync_period_in_ms': 500,
-                                                  'partitioner': 'org.apache.cassandra.dht.Murmur3Partitioner'})
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
+        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
+                                                                                     'num_tokens': 1,
+                                                                                     'commitlog_sync_period_in_ms': 500,
+                                                                                     'partitioner': 'org.apache.cassandra.dht.Murmur3Partitioner'})
+        self.cluster.populate(3).start()
+        node1, node2, node3 = self.cluster.nodelist()
 
         session = self.patient_exclusive_cql_connection(node3)
         session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
@@ -851,12 +873,12 @@ class TestIncRepair(Tester):
         for i in range(10):
             session.execute(stmt, (i, i))
 
-        for node in cluster.nodelist():
+        for node in self.cluster.nodelist():
             node.flush()
             self.assertNoRepairedSSTables(node, 'ks')
 
         # only repair the partition k=0
-        token = Murmur3Token.from_key(str(bytearray([0, 0, 0, 0])))
+        token = Murmur3Token.from_key(bytes([0, 0, 0, 0]))
         # import ipdb; ipdb.set_trace()
         # run with force flag
         node1.repair(options=['ks', '-st', str(token.value - 1), '-et', str(token.value)])

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/repair_tests/preview_repair_test.py
----------------------------------------------------------------------
diff --git a/repair_tests/preview_repair_test.py b/repair_tests/preview_repair_test.py
index 86627ab..ee5a38d 100644
--- a/repair_tests/preview_repair_test.py
+++ b/repair_tests/preview_repair_test.py
@@ -1,23 +1,25 @@
+import pytest
 import time
 
 from cassandra import ConsistencyLevel
 from cassandra.query import SimpleStatement
 
 from dtest import Tester
-from tools.decorators import no_vnodes, since
+
+since = pytest.mark.since
 
 
 @since('4.0')
-class PreviewRepairTest(Tester):
+class TestPreviewRepair(Tester):
 
     def assert_no_repair_history(self, session):
         rows = session.execute("select * from system_distributed.repair_history")
-        self.assertEqual(rows.current_rows, [])
+        assert rows.current_rows == []
         rows = session.execute("select * from system_distributed.parent_repair_history")
-        self.assertEqual(rows.current_rows, [])
+        assert rows.current_rows == []
 
-    @no_vnodes()
-    def preview_test(self):
+    @pytest.mark.no_vnodes
+    def test_preview(self):
         """ Test that preview correctly detects out of sync data """
         cluster = self.cluster
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'commitlog_sync_period_in_ms': 500})
@@ -30,7 +32,7 @@ class PreviewRepairTest(Tester):
 
         # everything should be in sync
         result = node1.repair(options=['ks', '--preview'])
-        self.assertIn("Previewed data was in sync", result.stdout)
+        assert "Previewed data was in sync" in result.stdout
         self.assert_no_repair_history(session)
 
         # make data inconsistent between nodes
@@ -57,16 +59,16 @@ class PreviewRepairTest(Tester):
 
         # data should not be in sync for full and unrepaired previews
         result = node1.repair(options=['ks', '--preview'])
-        self.assertIn("Total estimated streaming", result.stdout)
-        self.assertNotIn("Previewed data was in sync", result.stdout)
+        assert "Total estimated streaming" in result.stdout
+        assert "Previewed data was in sync" not in result.stdout
 
         result = node1.repair(options=['ks', '--preview', '--full'])
-        self.assertIn("Total estimated streaming", result.stdout)
-        self.assertNotIn("Previewed data was in sync", result.stdout)
+        assert "Total estimated streaming" in result.stdout
+        assert "Previewed data was in sync" not in result.stdout
 
         # repaired data should be in sync anyway
         result = node1.repair(options=['ks', '--validate'])
-        self.assertIn("Repaired data is in sync", result.stdout)
+        assert "Repaired data is in sync" in result.stdout
 
         self.assert_no_repair_history(session)
 
@@ -77,10 +79,10 @@ class PreviewRepairTest(Tester):
 
         # ...and everything should be in sync
         result = node1.repair(options=['ks', '--preview'])
-        self.assertIn("Previewed data was in sync", result.stdout)
+        assert "Previewed data was in sync" in result.stdout
 
         result = node1.repair(options=['ks', '--preview', '--full'])
-        self.assertIn("Previewed data was in sync", result.stdout)
+        assert "Previewed data was in sync" in result.stdout
 
         result = node1.repair(options=['ks', '--validate'])
-        self.assertIn("Repaired data is in sync", result.stdout)
+        assert "Repaired data is in sync" in result.stdout


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[24/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/meta_tests/assertion_test.py
----------------------------------------------------------------------
diff --git a/meta_tests/assertion_test.py b/meta_tests/assertion_test.py
index a056e47..2c3fe09 100644
--- a/meta_tests/assertion_test.py
+++ b/meta_tests/assertion_test.py
@@ -34,14 +34,14 @@ class TestAssertStderrClean(TestCase):
 
     def test_invalid_error(self):
         err = "This string is no good and should fail."
-        with self.assertRaises(AssertionError):
+        with pytest.raises(AssertionError):
             assert_stderr_clean(err)
 
     def test_valid_and_invalid_errors_same_line(self):
         err = ("This string is no good and should fail.objc[36358]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk"
                "/Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib."
                "One of the two will be used. Which one is undefined.")
-        with self.assertRaises(AssertionError):
+        with pytest.raises(AssertionError):
             assert_stderr_clean(err)
 
     def test_invalid_error_after_valid_error(self):
@@ -49,7 +49,7 @@ class TestAssertStderrClean(TestCase):
                /Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib.
                One of the two will be used. Which one is undefined.
                This string is no good and should fail."""
-        with self.assertRaises(AssertionError):
+        with pytest.raises(AssertionError):
             assert_stderr_clean(err)
 
     def test_invalid_error_before_valid_errors(self):
@@ -57,13 +57,13 @@ class TestAssertStderrClean(TestCase):
                  Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/bin/java
                  and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib. One of the two will be used. Which one is undefined.
                  """
-        with self.assertRaises(AssertionError):
+        with pytest.raises(AssertionError):
             assert_stderr_clean(err)
 
 
 class TestAssertionMethods(TestCase):
 
-    def assertions_test(self):
+    def test_assertions(self):
         # assert_exception_test
         mock_session = Mock(**{'execute.side_effect': AlreadyExists("Dummy exception message.")})
         assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)
@@ -111,5 +111,5 @@ class TestAssertionMethods(TestCase):
         assert_almost_equal(1, 1.1, 1.3, error=.31)
 
     def test_almost_equal_expect_failure(self):
-        with self.assertRaises(AssertionError):
+        with pytest.raises(AssertionError):
             assert_almost_equal(1, 1.3, error=.1)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/meta_tests/utils_test/funcutils_test.py
----------------------------------------------------------------------
diff --git a/meta_tests/utils_test/funcutils_test.py b/meta_tests/utils_test/funcutils_test.py
index 3bbddd5..4d99c36 100644
--- a/meta_tests/utils_test/funcutils_test.py
+++ b/meta_tests/utils_test/funcutils_test.py
@@ -18,7 +18,7 @@ class Testget_rate_limited_function(TestCase):
         """
         self.assertIs(rate_limited_func_arg.func, self.mock_func)
         self.assertIs(rate_limited_func_arg.limit, self.mock_limit)
-        self.assertEqual(rate_limited_func_arg.last_called, False)
+        assert rate_limited_func_arg.last_called == False
 
     def test_init_with_positional_args(self):
         """
@@ -106,16 +106,16 @@ class Testget_rate_limited_function(TestCase):
         If you call a rate-limited function, last_called is set to a new value.
         """
         self.rate_limited_func.limit = 1
-        self.assertEqual(self.rate_limited_func.last_called, False)
+        assert self.rate_limited_func.last_called == False
         self.rate_limited_func()
-        self.assertAlmostEqual(self.rate_limited_func.last_called, time(), places=2)
+        assert abs(round(self.rate_limited_func.last_called, 2) - round(time(), 2)) <= 0.0
 
     def test_last_called_not_set_when_called_within_time_limit(self):
         """
         If you call a rate-limited function during the time limit, last_called is not set to a new value.
         """
         self.rate_limited_func.limit = 1
-        self.assertEqual(self.rate_limited_func.last_called, False)
+        assert self.rate_limited_func.last_called == False
         self.rate_limited_func()
         last_called = self.rate_limited_func.last_called
         self.rate_limited_func()

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/meta_tests/utils_test/metadata_wrapper_test.py
----------------------------------------------------------------------
diff --git a/meta_tests/utils_test/metadata_wrapper_test.py b/meta_tests/utils_test/metadata_wrapper_test.py
index 9009eac..4a8afc5 100644
--- a/meta_tests/utils_test/metadata_wrapper_test.py
+++ b/meta_tests/utils_test/metadata_wrapper_test.py
@@ -10,7 +10,7 @@ from tools.metadata_wrapper import (UpdatingClusterMetadataWrapper,
 
 
 class UpdatingMetadataWrapperBaseTest(TestCase):
-    def all_subclasses_known_test(self):
+    def test_all_subclasses_known(self):
         """
         Test that all the subclasses of UpdatingMetadataWrapperBase are known
         to this test suite. Basically, this will slap us on the wrist in the
@@ -40,7 +40,7 @@ class UpdatingMetadataWrapperBaseTest(TestCase):
             init_args = [MagicMock() for _ in range(init_arg_len - 1)]
             yield klaus(*init_args)
 
-    def all_subclasses_defer_getattr_test(self):
+    def test_all_subclasses_defer_getattr(self):
         """
         Each subclass should defer its attribute accesses to the wrapped
         object.
@@ -48,7 +48,7 @@ class UpdatingMetadataWrapperBaseTest(TestCase):
         for wrapper in self._each_subclass_instantiated_with_mock_args():
             self.assertIs(wrapper.foo, wrapper._wrapped.foo)
 
-    def all_subclasses_defer_getitem_test(self):
+    def test_all_subclasses_defer_getitem(self):
         """
         Each subclass should defer its item accesses to the wrapped object.
         """
@@ -57,8 +57,8 @@ class UpdatingMetadataWrapperBaseTest(TestCase):
             # from _wrapped[Y] for all Y
             wrapper._wrapped.__getitem__.side_effect = hash
             # check mocking correctness
-            self.assertNotEqual(wrapper['foo'], wrapper._wrapped['bar'])
-            self.assertEqual(wrapper['bar'], wrapper._wrapped['bar'])
+            assert wrapper['foo'] != wrapper._wrapped['bar']
+            assert wrapper['bar'] == wrapper._wrapped['bar']
 
 
 class UpdatingTableMetadataWrapperTest(TestCase):
@@ -74,7 +74,7 @@ class UpdatingTableMetadataWrapperTest(TestCase):
             max_schema_agreement_wait=self.max_schema_agreement_wait_sentinel
         )
 
-    def wrapped_access_calls_refresh_test(self):
+    def test_wrapped_access_calls_refresh(self):
         """
         Accessing the wrapped object should call the table-refreshing method on
         the cluster.
@@ -87,7 +87,7 @@ class UpdatingTableMetadataWrapperTest(TestCase):
             max_schema_agreement_wait=self.max_schema_agreement_wait_sentinel
         )
 
-    def default_wrapper_max_schema_agreement_wait_is_None_test(self):
+    def test_default_wrapper_max_schema_agreement_wait_is_None(self):
         wrapper = UpdatingTableMetadataWrapper(
             cluster=self.cluster_mock,
             ks_name=self.ks_name_sentinel,
@@ -100,7 +100,7 @@ class UpdatingTableMetadataWrapperTest(TestCase):
             max_schema_agreement_wait=None
         )
 
-    def wrapped_returns_table_metadata_test(self):
+    def test_wrapped_returns_table_metadata(self):
         """
         The wrapped object is accessed correctly from the internal cluster object.
         """
@@ -115,17 +115,12 @@ class UpdatingTableMetadataWrapperTest(TestCase):
         keyspaces_defaultdict[self.ks_name_sentinel].tables.__getitem__.side_effect = hash
 
         # check mocking correctness
-        self.assertNotEqual(
-            self.wrapper._wrapped,
-            self.cluster_mock.metadata.keyspaces[self.ks_name_sentinel].tables['foo']
-        )
+        assert self.wrapper._wrapped != self.cluster_mock.metadata.keyspaces[self.ks_name_sentinel].tables['foo']
+
         # and this is the behavior we care about
-        self.assertEqual(
-            self.wrapper._wrapped,
-            self.cluster_mock.metadata.keyspaces[self.ks_name_sentinel].tables[self.table_name_sentinel]
-        )
+        assert self.wrapper._wrapped ==self.cluster_mock.metadata.keyspaces[self.ks_name_sentinel].tables[self.table_name_sentinel]
 
-    def repr_test(self):
+    def test_repr(self):
         self.assertEqual(
             repr(self.wrapper),
             'UpdatingTableMetadataWrapper(cluster={}, ks_name={}, table_name={}, max_schema_agreement_wait={})'.format(
@@ -145,7 +140,7 @@ class UpdatingKeyspaceMetadataWrapperTest(TestCase):
             max_schema_agreement_wait=self.max_schema_agreement_wait_sentinel
         )
 
-    def wrapped_access_calls_refresh_test(self):
+    def test_wrapped_access_calls_refresh(self):
         """
         Accessing the wrapped object should call the keyspace-refreshing method
         on the cluster.
@@ -157,7 +152,7 @@ class UpdatingKeyspaceMetadataWrapperTest(TestCase):
             max_schema_agreement_wait=self.max_schema_agreement_wait_sentinel
         )
 
-    def default_wrapper_max_schema_agreement_wait_is_None_test(self):
+    def test_default_wrapper_max_schema_agreement_wait_is_None(self):
         wrapper = UpdatingKeyspaceMetadataWrapper(
             cluster=self.cluster_mock,
             ks_name=self.ks_name_sentinel
@@ -168,7 +163,7 @@ class UpdatingKeyspaceMetadataWrapperTest(TestCase):
             max_schema_agreement_wait=None
         )
 
-    def wrapped_returns_keyspace_metadata_test(self):
+    def test_wrapped_returns_keyspace_metadata(self):
         """
         The wrapped object is accessed correctly from the internal cluster object.
         """
@@ -176,10 +171,10 @@ class UpdatingKeyspaceMetadataWrapperTest(TestCase):
         # from keyspaces[Y] for all Y
         self.cluster_mock.metadata.keyspaces.__getitem__.side_effect = hash
         # check mocking correctness
-        self.assertNotEqual(self.wrapper._wrapped, self.cluster_mock.metadata.keyspaces['foo'])
-        self.assertEqual(self.wrapper._wrapped, self.cluster_mock.metadata.keyspaces[self.ks_name_sentinel])
+        assert self.wrapper._wrapped != self.cluster_mock.metadata.keyspaces['foo']
+        assert self.wrapper._wrapped == self.cluster_mock.metadata.keyspaces[self.ks_name_sentinel]
 
-    def repr_test(self):
+    def test_repr(self):
         self.assertEqual(
             repr(self.wrapper),
             'UpdatingKeyspaceMetadataWrapper(cluster={}, ks_name={}, max_schema_agreement_wait={})'.format(
@@ -198,7 +193,7 @@ class UpdatingClusterMetadataWrapperTest(TestCase):
             max_schema_agreement_wait=self.max_schema_agreement_wait_sentinel
         )
 
-    def wrapped_access_calls_refresh_test(self):
+    def test_wrapped_access_calls_refresh(self):
         """
         Accessing the wrapped object should call the schema-refreshing method
         on the cluster.
@@ -209,20 +204,20 @@ class UpdatingClusterMetadataWrapperTest(TestCase):
             max_schema_agreement_wait=self.max_schema_agreement_wait_sentinel
         )
 
-    def default_wrapper_max_schema_agreement_wait_is_None_test(self):
+    def test_default_wrapper_max_schema_agreement_wait_is_None(self):
         wrapper = UpdatingClusterMetadataWrapper(cluster=self.cluster_mock)
         wrapper._wrapped
         self.cluster_mock.refresh_schema_metadata.assert_called_once_with(
             max_schema_agreement_wait=None
         )
 
-    def wrapped_returns_cluster_metadata_test(self):
+    def test_wrapped_returns_cluster_metadata(self):
         """
         The wrapped object is accessed correctly from the internal cluster object.
         """
         self.assertIs(self.wrapper._wrapped, self.cluster_mock.metadata)
 
-    def repr_test(self):
+    def test_repr(self):
         self.assertEqual(
             repr(self.wrapper),
             'UpdatingClusterMetadataWrapper(cluster={}, max_schema_agreement_wait={})'.format(

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/metadata_test.py
----------------------------------------------------------------------
diff --git a/metadata_test.py b/metadata_test.py
new file mode 100644
index 0000000..90141e7
--- /dev/null
+++ b/metadata_test.py
@@ -0,0 +1,68 @@
+import threading
+import time
+import logging
+import pytest
+
+from dtest import Tester
+
+logger = logging.getLogger(__name__)
+
+
+class TestMetadata(Tester):
+
+    def force_compact(self):
+        cluster = self.cluster
+        (node1, node2) = cluster.nodelist()
+        node1.nodetool("compact keyspace1 standard1")
+
+    def force_repair(self):
+        cluster = self.cluster
+        (node1, node2) = cluster.nodelist()
+        node1.nodetool('repair keyspace1 standard1')
+
+    def do_read(self):
+        cluster = self.cluster
+        (node1, node2) = cluster.nodelist()
+
+        node1.stress(['read', 'no-warmup', 'n=30000', '-schema', 'replication(factor=2)', 'compression=LZ4Compressor',
+                      '-rate', 'threads=1'])
+
+    @pytest.mark.skip(reason='hangs CI')
+    def test_metadata_reset_while_compact(self):
+        """
+        Resets the schema while a compact, read and repair happens.
+        All kinds of glorious things can fail.
+        """
+        # while the schema is being reset, there will inevitably be some
+        # queries that will error with this message
+        self.fixture_dtest_setup.ignore_log_patterns = ['.*Unknown keyspace/cf pair.*']
+
+        cluster = self.cluster
+        cluster.populate(2).start(wait_other_notice=True)
+        (node1, node2) = cluster.nodelist()
+
+        node1.nodetool("disableautocompaction")
+        node1.nodetool("setcompactionthroughput 1")
+
+        for i in range(3):
+            node1.stress(['write', 'no-warmup', 'n=30000', '-schema', 'replication(factor=2)',
+                          'compression=LZ4Compressor', '-rate', 'threads=5', '-pop', 'seq=1..30000'])
+            node1.flush()
+
+        thread = threading.Thread(target=self.force_compact)
+        thread.start()
+        time.sleep(1)
+
+        thread2 = threading.Thread(target=self.force_repair)
+        thread2.start()
+        time.sleep(5)
+
+        thread3 = threading.Thread(target=self.do_read)
+        thread3.start()
+        time.sleep(5)
+
+        node1.nodetool("resetlocalschema")
+
+        thread.join()
+        thread2.join()
+        thread3.join()

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/metadata_tests.py
----------------------------------------------------------------------
diff --git a/metadata_tests.py b/metadata_tests.py
deleted file mode 100644
index 9c5ee2e..0000000
--- a/metadata_tests.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import threading
-import time
-from unittest import skip
-
-from dtest import Tester
-
-
-class TestMetadata(Tester):
-
-    def force_compact(self):
-        cluster = self.cluster
-        (node1, node2) = cluster.nodelist()
-        node1.nodetool("compact keyspace1 standard1")
-
-    def force_repair(self):
-        cluster = self.cluster
-        (node1, node2) = cluster.nodelist()
-        node1.nodetool('repair keyspace1 standard1')
-
-    def do_read(self):
-        cluster = self.cluster
-        (node1, node2) = cluster.nodelist()
-
-        node1.stress(['read', 'no-warmup', 'n=30000', '-schema', 'replication(factor=2)', 'compression=LZ4Compressor',
-                      '-rate', 'threads=1'])
-
-    @skip('hangs CI')
-    def metadata_reset_while_compact_test(self):
-        """
-        Resets the schema while a compact, read and repair happens.
-        All kinds of glorious things can fail.
-        """
-
-        # while the schema is being reset, there will inevitably be some
-        # queries that will error with this message
-        self.ignore_log_patterns = '.*Unknown keyspace/cf pair.*'
-
-        cluster = self.cluster
-        cluster.populate(2).start(wait_other_notice=True)
-        (node1, node2) = cluster.nodelist()
-
-        node1.nodetool("disableautocompaction")
-        node1.nodetool("setcompactionthroughput 1")
-
-        for i in range(3):
-            node1.stress(['write', 'no-warmup', 'n=30000', '-schema', 'replication(factor=2)', 'compression=LZ4Compressor', '-rate', 'threads=5', '-pop', 'seq=1..30000'])
-            node1.flush()
-
-        thread = threading.Thread(target=self.force_compact)
-        thread.start()
-        time.sleep(1)
-
-        thread2 = threading.Thread(target=self.force_repair)
-        thread2.start()
-        time.sleep(5)
-
-        thread3 = threading.Thread(target=self.do_read)
-        thread3.start()
-        time.sleep(5)
-
-        node1.nodetool("resetlocalschema")
-
-        thread.join()
-        thread2.join()
-        thread3.join()

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/mixed_version_test.py
----------------------------------------------------------------------
diff --git a/mixed_version_test.py b/mixed_version_test.py
index 9da28b9..6fc656a 100644
--- a/mixed_version_test.py
+++ b/mixed_version_test.py
@@ -1,8 +1,13 @@
+import pytest
+import logging
+
 from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout
 from cassandra.query import SimpleStatement
 
-from dtest import Tester, debug
-from tools.decorators import since
+from dtest import Tester
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestSchemaChanges(Tester):
@@ -20,19 +25,20 @@ class TestSchemaChanges(Tester):
 
         node1, node2 = cluster.nodelist()
         original_version = node1.get_cassandra_version()
+        upgraded_version = None
         if original_version.vstring.startswith('2.0'):
             upgraded_version = 'github:apache/cassandra-2.1'
         elif original_version.vstring.startswith('2.1'):
             upgraded_version = 'github:apache/cassandra-2.2'
         else:
-            self.skip("This test is only designed to work with 2.0 and 2.1 right now")
+            pytest.skip(msg="This test is only designed to work with 2.0 and 2.1 right now")
 
         # start out with a major behind the previous version
 
         # upgrade node1
         node1.stop()
         node1.set_install_dir(version=upgraded_version)
-        debug("Set new cassandra dir for %s: %s" % (node1.name, node1.get_install_dir()))
+        logger.debug("Set new cassandra dir for %s: %s" % (node1.name, node1.get_install_dir()))
 
         node1.set_log_level("INFO")
         node1.start()
@@ -40,8 +46,9 @@ class TestSchemaChanges(Tester):
         session = self.patient_exclusive_cql_connection(node1)
         session.cluster.max_schema_agreement_wait = -1  # don't wait for schema agreement
 
-        debug("Creating keyspace and table")
-        session.execute("CREATE KEYSPACE test_upgrades WITH replication={'class': 'SimpleStrategy', 'replication_factor': '2'}")
+        logger.debug("Creating keyspace and table")
+        session.execute("CREATE KEYSPACE test_upgrades WITH replication={'class': 'SimpleStrategy', "
+                        "'replication_factor': '2'}")
         session.execute("CREATE TABLE test_upgrades.foo (a int primary key, b int)")
 
         pattern = r".*Got .* command for nonexistent table test_upgrades.foo.*"
@@ -50,15 +57,16 @@ class TestSchemaChanges(Tester):
             session.execute(SimpleStatement("SELECT * FROM test_upgrades.foo", consistency_level=ConsistencyLevel.ALL))
             self.fail("expected failure")
         except (ReadTimeout, OperationTimedOut):
-            debug("Checking node2 for warning in log")
+            logger.debug("Checking node2 for warning in log")
             node2.watch_log_for(pattern, timeout=10)
 
         # non-paged range slice
         try:
-            session.execute(SimpleStatement("SELECT * FROM test_upgrades.foo", consistency_level=ConsistencyLevel.ALL, fetch_size=None))
+            session.execute(SimpleStatement("SELECT * FROM test_upgrades.foo", consistency_level=ConsistencyLevel.ALL,
+                                            fetch_size=None))
             self.fail("expected failure")
         except (ReadTimeout, OperationTimedOut):
-            debug("Checking node2 for warning in log")
+            logger.debug("Checking node2 for warning in log")
             pattern = r".*Got .* command for nonexistent table test_upgrades.foo.*"
             node2.watch_log_for(pattern, timeout=10)
 
@@ -69,6 +77,6 @@ class TestSchemaChanges(Tester):
                                                 consistency_level=ConsistencyLevel.ALL, fetch_size=None))
             self.fail("expected failure")
         except (ReadTimeout, OperationTimedOut):
-            debug("Checking node2 for warning in log")
+            logger.debug("Checking node2 for warning in log")
             pattern = r".*Got .* command for nonexistent table test_upgrades.foo.*"
             node2.watch_log_for(pattern, timeout=10)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/multidc_putget_test.py
----------------------------------------------------------------------
diff --git a/multidc_putget_test.py b/multidc_putget_test.py
index bad3fad..3af0c9a 100644
--- a/multidc_putget_test.py
+++ b/multidc_putget_test.py
@@ -1,10 +1,14 @@
+import logging
+
 from dtest import Tester, create_ks, create_cf
 from tools.data import putget
 
+logger = logging.getLogger(__name__)
+
 
 class TestMultiDCPutGet(Tester):
 
-    def putget_2dc_rf1_test(self):
+    def test_putget_2dc_rf1(self):
         """ Simple put-get test for 2 DC with one node each (RF=1) [catches #3539] """
         cluster = self.cluster
         cluster.populate([1, 1]).start()
@@ -15,7 +19,7 @@ class TestMultiDCPutGet(Tester):
 
         putget(cluster, session)
 
-    def putget_2dc_rf2_test(self):
+    def test_putget_2dc_rf2(self):
         """ Simple put-get test for 2 DC with 2 node each (RF=2) -- tests cross-DC efficient writes """
         cluster = self.cluster
         cluster.populate([2, 2]).start()

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/native_transport_ssl_test.py
----------------------------------------------------------------------
diff --git a/native_transport_ssl_test.py b/native_transport_ssl_test.py
index 4716c80..284617d 100644
--- a/native_transport_ssl_test.py
+++ b/native_transport_ssl_test.py
@@ -1,20 +1,24 @@
 import os
+import pytest
+import logging
 
 from cassandra import ConsistencyLevel
 from cassandra.cluster import NoHostAvailable
 
 from dtest import Tester, create_ks, create_cf
 from tools.data import putget
-from tools.decorators import since
 from tools.misc import generate_ssl_stores
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
-class NativeTransportSSL(Tester):
+
+class TestNativeTransportSSL(Tester):
     """
     Native transport integration tests, specifically for ssl and port configurations.
     """
 
-    def connect_to_ssl_test(self):
+    def test_connect_to_ssl(self):
         """
         Connecting to SSL enabled native transport port should only be possible using SSL enabled client
         """
@@ -30,13 +34,13 @@ class NativeTransportSSL(Tester):
         except NoHostAvailable:
             pass
 
-        self.assertGreater(len(node1.grep_log("io.netty.handler.ssl.NotSslRecordException.*")), 0, "Missing SSL handshake exception while connecting with non-SSL enabled client")
+        assert len(node1.grep_log("io.netty.handler.ssl.NotSslRecordException.*")), 0 > "Missing SSL handshake exception while connecting with non-SSL enabled client"
 
         # enabled ssl on the client and try again (this should work)
-        session = self.patient_cql_connection(node1, ssl_opts={'ca_certs': os.path.join(self.test_path, 'ccm_node.cer')})
+        session = self.patient_cql_connection(node1, ssl_opts={'ca_certs': os.path.join(self.fixture_dtest_setup.test_path, 'ccm_node.cer')})
         self._putget(cluster, session)
 
-    def connect_to_ssl_optional_test(self):
+    def test_connect_to_ssl_optional(self):
         """
         Connecting to SSL optional native transport port must be possible with SSL and non-SSL native clients
         @jira_ticket CASSANDRA-10559
@@ -50,14 +54,13 @@ class NativeTransportSSL(Tester):
         self._putget(cluster, session)
 
         # enabled ssl on the client and try again (this should work)
-        session = self.patient_cql_connection(node1, ssl_opts={'ca_certs': os.path.join(self.test_path, 'ccm_node.cer')})
+        session = self.patient_cql_connection(node1, ssl_opts={'ca_certs': os.path.join(self.fixture_dtest_setup.test_path, 'ccm_node.cer')})
         self._putget(cluster, session, ks='ks2')
 
-    def use_custom_port_test(self):
+    def test_use_custom_port(self):
         """
         Connect to non-default native transport port
         """
-
         cluster = self._populateCluster(nativePort=9567)
         node1 = cluster.nodelist()[0]
 
@@ -72,12 +75,11 @@ class NativeTransportSSL(Tester):
         self._putget(cluster, session)
 
     @since('3.0')
-    def use_custom_ssl_port_test(self):
+    def test_use_custom_ssl_port(self):
         """
         Connect to additional ssl enabled native transport port
         @jira_ticket CASSANDRA-9590
         """
-
         cluster = self._populateCluster(enableSSL=True, nativePortSSL=9666)
         node1 = cluster.nodelist()[0]
         cluster.start()
@@ -87,14 +89,14 @@ class NativeTransportSSL(Tester):
         self._putget(cluster, session)
 
         # connect to additional dedicated ssl port
-        session = self.patient_cql_connection(node1, port=9666, ssl_opts={'ca_certs': os.path.join(self.test_path, 'ccm_node.cer')})
+        session = self.patient_cql_connection(node1, port=9666, ssl_opts={'ca_certs': os.path.join(self.fixture_dtest_setup.test_path, 'ccm_node.cer')})
         self._putget(cluster, session, ks='ks2')
 
     def _populateCluster(self, enableSSL=False, nativePort=None, nativePortSSL=None, sslOptional=False):
         cluster = self.cluster
 
         if enableSSL:
-            generate_ssl_stores(self.test_path)
+            generate_ssl_stores(self.fixture_dtest_setup.test_path)
             # C* versions before 3.0 (CASSANDRA-10559) do not know about
             # 'client_encryption_options.optional' - so we must not add that parameter
             if sslOptional:
@@ -102,7 +104,7 @@ class NativeTransportSSL(Tester):
                     'client_encryption_options': {
                         'enabled': True,
                         'optional': sslOptional,
-                        'keystore': os.path.join(self.test_path, 'keystore.jks'),
+                        'keystore': os.path.join(self.fixture_dtest_setup.test_path, 'keystore.jks'),
                         'keystore_password': 'cassandra'
                     }
                 })
@@ -110,7 +112,7 @@ class NativeTransportSSL(Tester):
                 cluster.set_configuration_options({
                     'client_encryption_options': {
                         'enabled': True,
-                        'keystore': os.path.join(self.test_path, 'keystore.jks'),
+                        'keystore': os.path.join(self.fixture_dtest_setup.test_path, 'keystore.jks'),
                         'keystore_password': 'cassandra'
                     }
                 })

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/nodetool_test.py
----------------------------------------------------------------------
diff --git a/nodetool_test.py b/nodetool_test.py
index b00e442..e913b30 100644
--- a/nodetool_test.py
+++ b/nodetool_test.py
@@ -1,12 +1,19 @@
 import os
+import pytest
+import re
+import logging
+
 from cassandra import ConsistencyLevel
 from cassandra.query import SimpleStatement
 from ccmlib.node import ToolError
-from dtest import Tester, debug, create_ks
+
+from dtest import Tester, create_ks
 from tools.assertions import assert_all, assert_invalid, assert_none
-from tools.decorators import since
 from tools.jmxutils import JolokiaAgent, make_mbean, remove_perf_disable_shared_mem
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 class TestNodetool(Tester):
 
@@ -26,10 +33,10 @@ class TestNodetool(Tester):
 
         try:
             node.decommission()
-            self.assertFalse("Expected nodetool error")
+            assert not "Expected nodetool error"
         except ToolError as e:
-            self.assertEqual('', e.stderr)
-            self.assertTrue('Unsupported operation' in e.stdout)
+            assert '' == e.stderr
+            assert 'Unsupported operation' in e.stdout
 
     def test_correct_dc_rack_in_nodetool_info(self):
         """
@@ -51,16 +58,19 @@ class TestNodetool(Tester):
 
         for i, node in enumerate(cluster.nodelist()):
             out, err, _ = node.nodetool('info')
-            self.assertEqual(0, len(err), err)
-            debug(out)
-            for line in out.split(os.linesep):
+            assert 0 == len(err), err
+            out_str = out
+            if isinstance(out, (bytes, bytearray)):
+                out_str = out.decode("utf-8")
+            logger.debug(out_str)
+            for line in out_str.split(os.linesep):
                 if line.startswith('Data Center'):
-                    self.assertTrue(line.endswith(node.data_center),
-                                    "Expected dc {} for {} but got {}".format(node.data_center, node.address(), line.rsplit(None, 1)[-1]))
+                    assert line.endswith(node.data_center), \
+                        "Expected dc {} for {} but got {}".format(node.data_center, node.address(), line.rsplit(None, 1)[-1])
                 elif line.startswith('Rack'):
                     rack = "rack{}".format(i % 2)
-                    self.assertTrue(line.endswith(rack),
-                                    "Expected rack {} for {} but got {}".format(rack, node.address(), line.rsplit(None, 1)[-1]))
+                    assert line.endswith(rack), \
+                        "Expected rack {} for {} but got {}".format(rack, node.address(), line.rsplit(None, 1)[-1])
 
     @since('3.4')
     def test_nodetool_timeout_commands(self):
@@ -81,21 +91,21 @@ class TestNodetool(Tester):
         # read all of the timeouts, make sure we get a sane response
         for timeout_type in types:
             out, err, _ = node.nodetool('gettimeout {}'.format(timeout_type))
-            self.assertEqual(0, len(err), err)
-            debug(out)
-            self.assertRegexpMatches(out, r'.* \d+ ms')
+            assert 0 == len(err), err
+            logger.debug(out)
+            assert re.search(r'.* \d+ ms', out)
 
         # set all of the timeouts to 123
         for timeout_type in types:
             _, err, _ = node.nodetool('settimeout {} 123'.format(timeout_type))
-            self.assertEqual(0, len(err), err)
+            assert 0 == len(err), err
 
         # verify that they're all reported as 123
         for timeout_type in types:
             out, err, _ = node.nodetool('gettimeout {}'.format(timeout_type))
-            self.assertEqual(0, len(err), err)
-            debug(out)
-            self.assertRegexpMatches(out, r'.* 123 ms')
+            assert 0 == len(err), err
+            logger.debug(out)
+            assert re.search(r'.* 123 ms', out)
 
     @since('3.0')
     def test_cleanup_when_no_replica_with_index(self):
@@ -132,9 +142,9 @@ class TestNodetool(Tester):
         self.cluster.flush()
 
         for node in self.cluster.nodelist():
-            self.assertNotEqual(0, len(node.get_sstables('ks', 'cf')))
+            assert 0 != len(node.get_sstables('ks', 'cf'))
         if with_index:
-            self.assertEqual(len(list(session_dc2.execute("SELECT * FROM ks.cf WHERE value = 'value'"))), 100)
+            assert 100 == len(list(session_dc2.execute("SELECT * FROM ks.cf WHERE value = 'value'"))), 100
 
         # alter rf to only dc1
         session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'dc1' : 1, 'dc2' : 0};")
@@ -146,16 +156,16 @@ class TestNodetool(Tester):
         # check local data on dc2
         for node in self.cluster.nodelist():
             if node.data_center == 'dc2':
-                self.assertEqual(0, len(node.get_sstables('ks', 'cf')))
+                assert 0 == len(node.get_sstables('ks', 'cf'))
             else:
-                self.assertNotEqual(0, len(node.get_sstables('ks', 'cf')))
+                assert 0 != len(node.get_sstables('ks', 'cf'))
 
         # dc1 data remains
         statement = SimpleStatement("SELECT * FROM ks.cf", consistency_level=ConsistencyLevel.LOCAL_ONE)
-        self.assertEqual(len(list(session.execute(statement))), 100)
+        assert 100 == len(list(session.execute(statement)))
         if with_index:
             statement = SimpleStatement("SELECT * FROM ks.cf WHERE value = 'value'", consistency_level=ConsistencyLevel.LOCAL_ONE)
-            self.assertEqual(len(list(session.execute(statement))), 100)
+            assert len(list(session.execute(statement))) == 100
 
         # alter rf back to query dc2, no data, no index
         session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'dc1' : 0, 'dc2' : 1};")
@@ -182,30 +192,30 @@ class TestNodetool(Tester):
 
         # Do a first try without any keypace, we shouldn't have the notice
         out, err, _ = node.nodetool('status')
-        self.assertEqual(0, len(err), err)
-        self.assertNotRegexpMatches(out, notice_message)
+        assert 0 == len(err), err
+        assert not re.search(notice_message, out)
 
         session = self.patient_cql_connection(node)
         session.execute("CREATE KEYSPACE ks1 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':1}")
 
         # With 1 keyspace, we should still not get the notice
         out, err, _ = node.nodetool('status')
-        self.assertEqual(0, len(err), err)
-        self.assertNotRegexpMatches(out, notice_message)
+        assert 0 == len(err), err
+        assert not re.search(notice_message, out)
 
         session.execute("CREATE KEYSPACE ks2 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':1}")
 
         # With 2 keyspaces with the same settings, we should not get the notice
         out, err, _ = node.nodetool('status')
-        self.assertEqual(0, len(err), err)
-        self.assertNotRegexpMatches(out, notice_message)
+        assert 0 == len(err), err
+        assert not re.search(notice_message, out)
 
         session.execute("CREATE KEYSPACE ks3 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':3}")
 
         # With a keyspace without the same replication factor, we should get the notice
         out, err, _ = node.nodetool('status')
-        self.assertEqual(0, len(err), err)
-        self.assertRegexpMatches(out, notice_message)
+        assert 0 == len(err), err
+        assert re.search(notice_message, out)
 
     @since('4.0')
     def test_set_get_batchlog_replay_throttle(self):
@@ -220,14 +230,14 @@ class TestNodetool(Tester):
         cluster.start()
 
         # Test that nodetool help messages are displayed
-        self.assertTrue('Set batchlog replay throttle' in node.nodetool('help setbatchlogreplaythrottle').stdout)
-        self.assertTrue('Print batchlog replay throttle' in node.nodetool('help getbatchlogreplaythrottle').stdout)
+        assert 'Set batchlog replay throttle' in node.nodetool('help setbatchlogreplaythrottle').stdout
+        assert 'Print batchlog replay throttle' in node.nodetool('help getbatchlogreplaythrottle').stdout
 
         # Set and get throttle with nodetool, ensuring that the rate change is logged
         node.nodetool('setbatchlogreplaythrottle 2048')
-        self.assertTrue(len(node.grep_log('Updating batchlog replay throttle to 2048 KB/s, 1024 KB/s per endpoint',
-                                          filename='debug.log')) > 0)
-        self.assertTrue('Batchlog replay throttle: 2048 KB/s' in node.nodetool('getbatchlogreplaythrottle').stdout)
+        assert len(node.grep_log('Updating batchlog replay throttle to 2048 KB/s, 1024 KB/s per endpoint',
+                                 filename='debug.log')) >= 0
+        assert 'Batchlog replay throttle: 2048 KB/s' in node.nodetool('getbatchlogreplaythrottle').stdout
 
     @since('3.0')
     def test_reloadlocalschema(self):
@@ -244,7 +254,8 @@ class TestNodetool(Tester):
 
         session = self.patient_cql_connection(node)
 
-        query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
+        query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication " \
+                "= {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
         session.execute(query)
 
         query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
@@ -252,8 +263,6 @@ class TestNodetool(Tester):
 
         ss = make_mbean('db', type='StorageService')
 
-        schema_version = ''
-
         # get initial schema version
         with JolokiaAgent(node) as jmx:
             schema_version = jmx.read_attribute(ss, 'SchemaVersion')
@@ -270,7 +279,7 @@ class TestNodetool(Tester):
 
         # validate that schema version wasn't automatically updated
         with JolokiaAgent(node) as jmx:
-            self.assertEqual(schema_version, jmx.read_attribute(ss, 'SchemaVersion'))
+            assert schema_version == jmx.read_attribute(ss, 'SchemaVersion')
 
         # make sure the new column wasn't automagically picked up
         assert_invalid(session, 'INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);')
@@ -280,7 +289,7 @@ class TestNodetool(Tester):
 
         # validate that schema version changed
         with JolokiaAgent(node) as jmx:
-            self.assertNotEqual(schema_version, jmx.read_attribute(ss, 'SchemaVersion'))
+            assert schema_version != jmx.read_attribute(ss, 'SchemaVersion')
 
         # try an insert with the new column again and validate it succeeds this time
         session.execute('INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);')
@@ -299,19 +308,19 @@ class TestNodetool(Tester):
         cluster.start()
 
         # Test that nodetool help messages are displayed
-        self.assertTrue('Set the number of concurrent view' in node.nodetool('help setconcurrentviewbuilders').stdout)
-        self.assertTrue('Get the number of concurrent view' in node.nodetool('help getconcurrentviewbuilders').stdout)
+        assert 'Set the number of concurrent view' in node.nodetool('help setconcurrentviewbuilders').stdout
+        assert 'Get the number of concurrent view' in node.nodetool('help getconcurrentviewbuilders').stdout
 
         # Set and get throttle with nodetool, ensuring that the rate change is logged
         node.nodetool('setconcurrentviewbuilders 4')
-        self.assertTrue('Current number of concurrent view builders in the system is: \n4'
-                        in node.nodetool('getconcurrentviewbuilders').stdout)
+        assert 'Current number of concurrent view builders in the system is: \n4' \
+               in node.nodetool('getconcurrentviewbuilders').stdout
 
         # Try to set an invalid zero value
         try:
             node.nodetool('setconcurrentviewbuilders 0')
         except ToolError as e:
-            self.assertTrue('concurrent_view_builders should be great than 0.' in e.stdout)
-            self.assertTrue('Number of concurrent view builders should be greater than 0.', e.message)
+            assert 'concurrent_view_builders should be great than 0.' in e.stdout
+            assert 'Number of concurrent view builders should be greater than 0.', e.message
         else:
             self.fail("Expected error when setting and invalid value")

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/offline_tools_test.py
----------------------------------------------------------------------
diff --git a/offline_tools_test.py b/offline_tools_test.py
index 028027d..7c7cc8f 100644
--- a/offline_tools_test.py
+++ b/offline_tools_test.py
@@ -3,21 +3,29 @@ import os
 import random
 import re
 import subprocess
+import pytest
+import logging
 
 from ccmlib import common
 from ccmlib.node import ToolError
 
-from dtest import Tester, debug, create_ks
-from tools.decorators import since
+from dtest import Tester, create_ks
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestOfflineTools(Tester):
 
-    # In 2.0, we will get this error log message due to jamm not being
-    # in the classpath
-    ignore_log_patterns = ["Unable to initialize MemoryMeter"]
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            # In 2.0, we will get this error log message due to jamm not being
+            # in the classpath
+            "Unable to initialize MemoryMeter"
+        )
 
-    def sstablelevelreset_test(self):
+    def test_sstablelevelreset(self):
         """
         Insert data and call sstablelevelreset on a series of
         tables. Confirm level is reset to 0 using its output.
@@ -34,9 +42,9 @@ class TestOfflineTools(Tester):
         try:
             node1.run_sstablelevelreset("keyspace1", "standard1")
         except ToolError as e:
-            self.assertIn("ColumnFamily not found: keyspace1/standard1", e.message)
+            assert re.search("ColumnFamily not found: keyspace1/standard1", str(e))
             # this should return exit code 1
-            self.assertEqual(e.exit_status, 1, "Expected sstablelevelreset to have a return code of 1, but instead return code was {}".format(e.exit_status))
+            assert e.exit_status == 1, "Expected sstablelevelreset to have a return code of 1 == but instead return code was {}".format(e.exit_status)
 
         # now test by generating keyspace but not flushing sstables
         cluster.start(wait_for_binary_proto=True)
@@ -46,8 +54,8 @@ class TestOfflineTools(Tester):
 
         output, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
         self._check_stderr_error(error)
-        self.assertIn("Found no sstables, did you give the correct keyspace", output)
-        self.assertEqual(rc, 0, msg=str(rc))
+        assert re.search("Found no sstables, did you give the correct keyspace", output.decode("utf-8"))
+        assert rc == 0, str(rc)
 
         # test by writing small amount of data and flushing (all sstables should be level 0)
         cluster.start(wait_for_binary_proto=True)
@@ -59,9 +67,9 @@ class TestOfflineTools(Tester):
         cluster.stop(gently=False)
 
         output, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
-        self._check_stderr_error(error)
-        self.assertIn("since it is already on level 0", output)
-        self.assertEqual(rc, 0, msg=str(rc))
+        self._check_stderr_error(error.decode("utf-8"))
+        assert re.search("since it is already on level 0", output.decode("utf-8"))
+        assert rc == 0, str(rc)
 
         # test by loading large amount data so we have multiple levels and checking all levels are 0 at end
         cluster.start(wait_for_binary_proto=True)
@@ -74,21 +82,21 @@ class TestOfflineTools(Tester):
         initial_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
         _, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
         final_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
-        self._check_stderr_error(error)
-        self.assertEqual(rc, 0, msg=str(rc))
+        self._check_stderr_error(error.decode("utf-8"))
+        assert rc == 0, str(rc)
 
-        debug(initial_levels)
-        debug(final_levels)
+        logger.debug(initial_levels)
+        logger.debug(final_levels)
 
         # let's make sure there was at least L1 beforing resetting levels
-        self.assertTrue(max(initial_levels) > 0)
+        assert max(initial_levels) > 0
 
         # let's check all sstables are on L0 after sstablelevelreset
-        self.assertTrue(max(final_levels) == 0)
+        assert max(final_levels) == 0
 
     def get_levels(self, data):
         (out, err, rc) = data
-        return map(int, re.findall("SSTable Level: ([0-9])", out))
+        return list(map(int, re.findall("SSTable Level: ([0-9])", out.decode("utf-8"))))
 
     def wait_for_compactions(self, node):
         pattern = re.compile("pending tasks: 0")
@@ -97,7 +105,7 @@ class TestOfflineTools(Tester):
             if pattern.search(output):
                 break
 
-    def sstableofflinerelevel_test(self):
+    def test_sstableofflinerelevel(self):
         """
         Generate sstables of varying levels.
         Reset sstables to L0 with sstablelevelreset
@@ -115,9 +123,9 @@ class TestOfflineTools(Tester):
         # test by trying to run on nonexistent keyspace
         # cluster.stop(gently=False)
         # output, error, rc = node1.run_sstableofflinerelevel("keyspace1", "standard1", output=True)
-        # self.assertTrue("java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.standard1" in error)
+        # assert "java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.standard1" in error
         # # this should return exit code 1
-        # self.assertEqual(rc, 1, msg=str(rc))
+        # assert rc, 1 == msg=str(rc)
         # cluster.start()
 
         # now test by generating keyspace but not flushing sstables
@@ -131,13 +139,13 @@ class TestOfflineTools(Tester):
         try:
             output, error, _ = node1.run_sstableofflinerelevel("keyspace1", "standard1")
         except ToolError as e:
-            self.assertIn("No sstables to relevel for keyspace1.standard1", e.stdout)
-            self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
+            assert re.search("No sstables to relevel for keyspace1.standard1", e.stdout.decode("utf-8"))
+            assert e.exit_status == 1, str(e.exit_status)
 
         # test by flushing (sstable should be level 0)
         cluster.start(wait_for_binary_proto=True)
         session = self.patient_cql_connection(node1)
-        debug("Altering compaction strategy to LCS")
+        logger.debug("Altering compaction strategy to LCS")
         session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1};")
 
         node1.stress(['write', 'n=1K', 'no-warmup',
@@ -149,8 +157,8 @@ class TestOfflineTools(Tester):
         cluster.stop()
 
         output, _, rc = node1.run_sstableofflinerelevel("keyspace1", "standard1")
-        self.assertIn("L0=1", output)
-        self.assertEqual(rc, 0, msg=str(rc))
+        assert re.search("L0=1", output.decode("utf-8"))
+        assert rc == 0, str(rc)
 
         cluster.start(wait_for_binary_proto=True)
         # test by loading large amount data so we have multiple sstables
@@ -162,56 +170,55 @@ class TestOfflineTools(Tester):
                       '-rate', 'threads=8'])
 
         node1.flush()
-        debug("Waiting for compactions to finish")
+        logger.debug("Waiting for compactions to finish")
         self.wait_for_compactions(node1)
-        debug("Stopping node")
+        logger.debug("Stopping node")
         cluster.stop()
-        debug("Done stopping node")
+        logger.debug("Done stopping node")
 
         # Let's reset all sstables to L0
-        debug("Getting initial levels")
+        logger.debug("Getting initial levels")
         initial_levels = list(self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"])))
-        self.assertNotEqual([], initial_levels)
-        debug('initial_levels:')
-        debug(initial_levels)
-        debug("Running sstablelevelreset")
+        assert [] != initial_levels
+        logger.debug('initial_levels:')
+        logger.debug(initial_levels)
+        logger.debug("Running sstablelevelreset")
         node1.run_sstablelevelreset("keyspace1", "standard1")
-        debug("Getting final levels")
+        logger.debug("Getting final levels")
         final_levels = list(self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"])))
-        self.assertNotEqual([], final_levels)
-        debug('final levels:')
-        debug(final_levels)
+        assert [] != final_levels
+        logger.debug('final levels:')
+        logger.debug(final_levels)
 
         # let's make sure there was at least 3 levels (L0, L1 and L2)
-        self.assertGreater(max(initial_levels), 1)
+        assert max(initial_levels) > 1
         # let's check all sstables are on L0 after sstablelevelreset
-        self.assertEqual(max(final_levels), 0)
+        assert max(final_levels) == 0
 
         # time to relevel sstables
-        debug("Getting initial levels")
+        logger.debug("Getting initial levels")
         initial_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
-        debug("Running sstableofflinerelevel")
+        logger.debug("Running sstableofflinerelevel")
         output, error, _ = node1.run_sstableofflinerelevel("keyspace1", "standard1")
-        debug("Getting final levels")
+        logger.debug("Getting final levels")
         final_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
 
-        debug(output)
-        debug(error)
+        logger.debug(output)
+        logger.debug(error)
 
-        debug(initial_levels)
-        debug(final_levels)
+        logger.debug(initial_levels)
+        logger.debug(final_levels)
 
         # let's check sstables were promoted after releveling
-        self.assertGreater(max(final_levels), 1)
+        assert max(final_levels) > 1
 
     @since('2.2')
-    def sstableverify_test(self):
+    def test_sstableverify(self):
         """
         Generate sstables and test offline verification works correctly
         Test on bad input: nonexistent keyspace and sstables
         Test on potential situations: deleted sstables, corrupted sstables
         """
-
         cluster = self.cluster
         cluster.populate(3).start(wait_for_binary_proto=True)
         node1, node2, node3 = cluster.nodelist()
@@ -220,14 +227,14 @@ class TestOfflineTools(Tester):
         try:
             (out, err, rc) = node1.run_sstableverify("keyspace1", "standard1")
         except ToolError as e:
-            self.assertIn("Unknown keyspace/table keyspace1.standard1", e.message)
-            self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
+            assert "Unknown keyspace/table keyspace1.standard1" in repr(e)
+            assert e.exit_status == 1, str(e.exit_status)
 
         # test on nonexistent sstables:
         node1.stress(['write', 'n=100', 'no-warmup', '-schema', 'replication(factor=3)',
                       '-rate', 'threads=8'])
         (out, err, rc) = node1.run_sstableverify("keyspace1", "standard1")
-        self.assertEqual(rc, 0, msg=str(rc))
+        assert rc == 0, str(rc)
 
         # Generate multiple sstables and test works properly in the simple case
         node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)',
@@ -240,15 +247,14 @@ class TestOfflineTools(Tester):
 
         (out, error, rc) = node1.run_sstableverify("keyspace1", "standard1")
 
-        self.assertEqual(rc, 0, msg=str(rc))
+        assert rc == 0, str(rc)
 
         # STDOUT of the sstableverify command consists of multiple lines which may contain
         # Java-normalized paths. To later compare these with Python-normalized paths, we
         # map over each line of out and replace Java-normalized paths with Python equivalents.
-        outlines = map(lambda line: re.sub("(?<=path=').*(?=')",
+        outlines = [re.sub("(?<=path=').*(?=')",
                                            lambda match: os.path.normcase(match.group(0)),
-                                           line),
-                       out.splitlines())
+                                           line) for line in out.decode("utf-8").splitlines()]
 
         # check output is correct for each sstable
         sstables = self._get_final_sstables(node1, "keyspace1", "standard1")
@@ -263,18 +269,18 @@ class TestOfflineTools(Tester):
                     elif "Checking computed hash of BigTableReader" in line:
                         hashcomputed = True
                     else:
-                        debug(line)
+                        logger.debug(line)
 
-            debug(verified)
-            debug(hashcomputed)
-            debug(sstable)
-            self.assertTrue(verified and hashcomputed)
+            logger.debug(verified)
+            logger.debug(hashcomputed)
+            logger.debug(sstable)
+            assert verified and hashcomputed
 
         # now try intentionally corrupting an sstable to see if hash computed is different and error recognized
         sstable1 = sstables[1]
-        with open(sstable1, 'r') as f:
+        with open(sstable1, 'rb') as f:
             sstabledata = bytearray(f.read())
-        with open(sstable1, 'w') as out:
+        with open(sstable1, 'wb') as out:
             position = random.randrange(0, len(sstabledata))
             sstabledata[position] = (sstabledata[position] + 1) % 256
             out.write(sstabledata)
@@ -284,12 +290,12 @@ class TestOfflineTools(Tester):
             (out, error, rc) = node1.run_sstableverify("keyspace1", "standard1", options=['-v'])
         except ToolError as e:
             # Process sstableverify output to normalize paths in string to Python casing as above
-            error = re.sub("(?<=Corrupted: ).*", lambda match: os.path.normcase(match.group(0)), e.message)
+            error = re.sub("(?<=Corrupted: ).*", lambda match: os.path.normcase(match.group(0)), str(e))
 
-            self.assertIn("Corrupted: " + sstable1, error)
-            self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
+            assert re.search("Corrupted: " + sstable1, error)
+            assert e.exit_status == 1, str(e.exit_status)
 
-    def sstableexpiredblockers_test(self):
+    def test_sstableexpiredblockers(self):
         cluster = self.cluster
         cluster.populate(1).start(wait_for_binary_proto=True)
         [node1] = cluster.nodelist()
@@ -304,14 +310,14 @@ class TestOfflineTools(Tester):
         session.execute("delete from ks.cf where key = 3")
         node1.flush()
         out, error, _ = node1.run_sstableexpiredblockers(keyspace="ks", column_family="cf")
-        self.assertIn("blocks 2 expired sstables from getting dropped", out)
+        assert "blocks 2 expired sstables from getting dropped" in out.decode("utf-8")
 
     # 4.0 removes back compatibility with pre-3.0 versions, so testing upgradesstables for
     # paths from those versions to 4.0 is invalid (and can only fail). There isn't currently
     # any difference between the 3.0 and 4.0 sstable format though, but when the version is
     # bumped for 4.0, remove the max_version & add a case for testing a 3.0 -> 4.0 upgrade
     @since('2.2', max_version='3.X')
-    def sstableupgrade_test(self):
+    def test_sstableupgrade(self):
         """
         Test that sstableupgrade functions properly offline on a same-version Cassandra sstable, a
         stdout message of "Found 0 sstables that need upgrading." should be returned.
@@ -320,7 +326,7 @@ class TestOfflineTools(Tester):
         cluster = self.cluster
         testversion = cluster.version()
         original_install_dir = cluster.get_install_dir()
-        debug('Original install dir: {}'.format(original_install_dir))
+        logger.debug('Original install dir: {}'.format(original_install_dir))
 
         # Set up last major version to upgrade from, assuming 2.1 branch is the oldest tested version
         if testversion < '2.2':
@@ -330,36 +336,36 @@ class TestOfflineTools(Tester):
             #   Error opening zip file or JAR manifest missing : /home/mshuler/git/cassandra/lib/jamm-0.2.5.jar
             # The 2.1 installed jamm version is 0.3.0, but bin/cassandra.in.sh used by nodetool still has 0.2.5
             # (when this is fixed in CCM issue #463, install version='github:apache/cassandra-2.0' as below)
-            self.skipTest('Skipping 2.1 test due to jamm.jar version upgrade problem in CCM node configuration.')
+            pytest.skip('Skipping 2.1 test due to jamm.jar version upgrade problem in CCM node configuration.')
         elif testversion < '3.0':
-            debug('Test version: {} - installing github:apache/cassandra-2.1'.format(testversion))
+            logger.debug('Test version: {} - installing github:apache/cassandra-2.1'.format(testversion))
             cluster.set_install_dir(version='github:apache/cassandra-2.1')
         # As of 3.5, sstable format 'ma' from 3.0 is still the latest - install 2.2 to upgrade from
         elif testversion < '4.0':
-            debug('Test version: {} - installing github:apache/cassandra-2.2'.format(testversion))
+            logger.debug('Test version: {} - installing github:apache/cassandra-2.2'.format(testversion))
             cluster.set_install_dir(version='github:apache/cassandra-2.2')
         # From 4.0, one can only upgrade from 3.0
         else:
-            debug('Test version: {} - installing github:apache/cassandra-3.0'.format(testversion))
+            logger.debug('Test version: {} - installing github:apache/cassandra-3.0'.format(testversion))
             cluster.set_install_dir(version='github:apache/cassandra-3.0')
 
         # Start up last major version, write out an sstable to upgrade, and stop node
         cluster.populate(1).start(wait_for_binary_proto=True)
         [node1] = cluster.nodelist()
         # Check that node1 is actually what we expect
-        debug('Downgraded install dir: {}'.format(node1.get_install_dir()))
+        logger.debug('Downgraded install dir: {}'.format(node1.get_install_dir()))
         session = self.patient_cql_connection(node1)
         create_ks(session, 'ks', 1)
         session.execute('create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0')
         session.execute('insert into ks.cf (key, val) values (1,1)')
         node1.flush()
         cluster.stop()
-        debug('Beginning ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
+        logger.debug('Beginning ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
 
         # Upgrade Cassandra to original testversion and run sstableupgrade
         cluster.set_install_dir(original_install_dir)
         # Check that node1 is actually upgraded
-        debug('Upgraded to original install dir: {}'.format(node1.get_install_dir()))
+        logger.debug('Upgraded to original install dir: {}'.format(node1.get_install_dir()))
         # Perform a node start/stop so system tables get internally updated, otherwise we may get "Unknown keyspace/table ks.cf"
         cluster.start(wait_for_binary_proto=True)
         node1.flush()
@@ -372,19 +378,19 @@ class TestOfflineTools(Tester):
         # change before it's release.
         if testversion < '4.0':
             (out, error, rc) = node1.run_sstableupgrade(keyspace='ks', column_family='cf')
-            debug(out)
-            debug(error)
-            debug('Upgraded ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
-            self.assertIn('Found 1 sstables that need upgrading.', out)
+            logger.debug(out)
+            logger.debug(error)
+            logger.debug('Upgraded ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
+            assert 'Found 1 sstables that need upgrading.' in out
 
         # Check that sstableupgrade finds no upgrade needed on current version.
         (out, error, rc) = node1.run_sstableupgrade(keyspace='ks', column_family='cf')
-        debug(out)
-        debug(error)
-        self.assertIn('Found 0 sstables that need upgrading.', out)
+        logger.debug(out)
+        logger.debug(error)
+        assert 'Found 0 sstables that need upgrading.' in out
 
     @since('3.0')
-    def sstabledump_test(self):
+    def test_sstabledump(self):
         """
         Test that sstabledump functions properly offline to output the contents of a table.
         """
@@ -405,41 +411,41 @@ class TestOfflineTools(Tester):
         node1.flush()
         cluster.stop()
         [(out, error, rc)] = node1.run_sstabledump(keyspace='ks', column_families=['cf'])
-        debug(out)
-        debug(error)
+        logger.debug(out)
+        logger.debug(error)
 
         # Load the json output and check that it contains the inserted key=1
         s = json.loads(out)
-        debug(s)
-        self.assertEqual(len(s), 2)
+        logger.debug(s)
+        assert len(s) == 2
 
         # order the rows so that we have key=1 first, then key=2
         row0, row1 = s
         (row0, row1) = (row0, row1) if row0['partition']['key'] == ['1'] else (row1, row0)
 
-        self.assertEqual(row0['partition']['key'], ['1'])
+        assert row0['partition']['key'] == ['1']
 
-        self.assertEqual(row1['partition']['key'], ['2'])
-        self.assertIsNotNone(row1['partition'].get('deletion_info'))
-        self.assertIsNotNone(row1.get('rows'))
+        assert row1['partition']['key'] == ['2']
+        assert row1['partition'].get('deletion_info') is not None
+        assert row1.get('rows') is not None
 
         # Check that we only get the key back using the enumerate option
         [(out, error, rc)] = node1.run_sstabledump(keyspace='ks', column_families=['cf'], enumerate_keys=True)
-        debug(out)
-        debug(error)
+        logger.debug(out)
+        logger.debug(error)
         s = json.loads(out)
-        debug(s)
-        self.assertEqual(len(s), 2)
+        logger.debug(s)
+        assert len(s) == 2
         dumped_keys = set(row[0] for row in s)
-        self.assertEqual(set(['1', '2']), dumped_keys)
+        assert {'1', '2'} == dumped_keys
 
     def _check_stderr_error(self, error):
         acceptable = ["Max sstable size of", "Consider adding more capacity", "JNA link failure", "Class JavaLaunchHelper is implemented in both"]
 
         if len(error) > 0:
             for line in error.splitlines():
-                self.assertTrue(any([msg in line for msg in acceptable]),
-                                'Found line \n\n"{line}"\n\n in error\n\n{error}'.format(line=line, error=error))
+                assert any([msg in line for msg in acceptable]), \
+                    'Found line \n\n"{line}"\n\n in error\n\n{error}'.format(line=line, error=error)
 
     def _get_final_sstables(self, node, ks, table):
         """
@@ -448,7 +454,7 @@ class TestOfflineTools(Tester):
         file names no longer contain tmp in their names (CASSANDRA-7066).
         """
         # Get all sstable data files
-        allsstables = map(os.path.normcase, node.get_sstables(ks, table))
+        allsstables = list(map(os.path.normcase, node.get_sstables(ks, table)))
 
         # Remove any temporary files
         tool_bin = node.get_tool('sstableutil')
@@ -457,7 +463,7 @@ class TestOfflineTools(Tester):
             env = common.make_cassandra_env(node.get_install_cassandra_root(), node.get_node_cassandra_root())
             p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
             (stdout, stderr) = p.communicate()
-            tmpsstables = map(os.path.normcase, stdout.splitlines())
+            tmpsstables = list(map(os.path.normcase, stdout.decode("utf-8").splitlines()))
 
             ret = list(set(allsstables) - set(tmpsstables))
         else:


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[06/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/token_generator_test.py
----------------------------------------------------------------------
diff --git a/token_generator_test.py b/token_generator_test.py
index c4846e6..b6e9025 100644
--- a/token_generator_test.py
+++ b/token_generator_test.py
@@ -1,15 +1,18 @@
-# coding: utf-8
 import os
 import subprocess
 import time
-
+import pytest
 import parse
+import logging
+
 from cassandra.util import sortedset
 from ccmlib import common
 
-from dtest import DISABLE_VNODES, Tester, debug
+from dtest import Tester
 from tools.data import rows_to_list
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('2.0.16', max_version='3.0.0')
@@ -36,7 +39,7 @@ class TestTokenGenerator(Tester):
         for n in nodes:
             args.append(str(n))
 
-        debug('Invoking {}'.format(args))
+        logger.debug('Invoking {}'.format(args))
         token_gen_output = subprocess.check_output(args)
         lines = token_gen_output.split("\n")
         dc_tokens = None
@@ -44,19 +47,19 @@ class TestTokenGenerator(Tester):
         for line in lines:
             if line.startswith("DC #"):
                 if dc_tokens is not None:
-                    self.assertGreater(dc_tokens.__len__(), 0, "dc_tokens is empty from token-generator {}".format(args))
+                    assert dc_tokens.__len__(), 0 > "dc_tokens is empty from token-generator {}".format(args)
                     generated_tokens.append(dc_tokens)
                 dc_tokens = []
             else:
                 if line:
                     m = parse.search('Node #{node_num:d}:{:s}{node_token:d}', line)
-                    self.assertIsNotNone(m, "Line \"{}\" does not match pattern from token-generator {}".format(line, args))
+                    assert m, "Line \"{}\" does not match pattern from token-generator {}".format(line is not None, args)
                     node_num = int(m.named['node_num'])
                     node_token = int(m.named['node_token'])
                     dc_tokens.append(node_token)
-                    self.assertEqual(node_num, dc_tokens.__len__(), "invalid token count from token-generator {}".format(args))
-        self.assertIsNotNone(dc_tokens, "No tokens from token-generator {}".format(args))
-        self.assertGreater(dc_tokens.__len__(), 0, "No tokens from token-generator {}".format(args))
+                    assert node_num, dc_tokens.__len__() == "invalid token count from token-generator {}".format(args)
+        assert dc_tokens is not None, "No tokens from token-generator {}".format(args)
+        assert dc_tokens.__len__(), 0 > "No tokens from token-generator {}".format(args)
         generated_tokens.append(dc_tokens)
 
         return generated_tokens
@@ -77,10 +80,10 @@ class TestTokenGenerator(Tester):
 
         # remove these from cluster options - otherwise node's config would be overridden with cluster._config_options_
         cluster._config_options.__delitem__('num_tokens')
-        if not DISABLE_VNODES:
+        if self.dtest_config.use_vnodes:
             cluster._config_options.__delitem__('initial_token')
 
-        self.assertTrue(not cluster.nodelist(), "nodelist() already initialized")
+        assert not cluster.nodelist(), "nodelist() already initialized"
         cluster.populate(nodes, use_vnodes=False, tokens=generated_tokens[0]).start(wait_for_binary_proto=True)
         time.sleep(0.2)
 
@@ -95,22 +98,22 @@ class TestTokenGenerator(Tester):
 
         tokens = []
         local_tokens = rows_to_list(session.execute("SELECT tokens FROM system.local"))[0]
-        self.assertEqual(local_tokens.__len__(), 1, "too many tokens for peer")
+        assert local_tokens.__len__(), 1 == "too many tokens for peer"
         for tok in local_tokens:
             tokens += tok
 
         rows = rows_to_list(session.execute("SELECT tokens FROM system.peers"))
-        self.assertEqual(rows.__len__(), nodes - 1)
+        assert rows.__len__() == nodes - 1
         for row in rows:
             peer_tokens = row[0]
-            self.assertEqual(peer_tokens.__len__(), 1, "too many tokens for peer")
+            assert peer_tokens.__len__(), 1 == "too many tokens for peer"
             for tok in peer_tokens:
                 tokens.append(tok)
 
-        self.assertEqual(tokens.__len__(), dc_tokens.__len__())
+        assert tokens.__len__() == dc_tokens.__len__()
         for cluster_token in tokens:
             tok = int(cluster_token)
-            self.assertGreaterEqual(dc_tokens.index(tok), 0, "token in cluster does not match generated tokens")
+            assert dc_tokens.index(tok), 0 >= "token in cluster does not match generated tokens"
 
     def token_gen_def_test(self, nodes=3):
         """ Validate token-generator with Murmur3Partitioner with default token-generator behavior """
@@ -148,23 +151,23 @@ class TestTokenGenerator(Tester):
             all_tokens = sortedset()
             node_count = 0
             generated_tokens = self.call_token_generator(self.cluster.get_install_dir(), random, dc_nodes)
-            self.assertEqual(dc_nodes.__len__(), generated_tokens.__len__())
+            assert dc_nodes.__len__() == generated_tokens.__len__()
             for n in range(0, dc_nodes.__len__()):
                 nodes = dc_nodes[n]
                 node_count += nodes
                 tokens = generated_tokens[n]
-                self.assertEqual(nodes, tokens.__len__())
+                assert nodes == tokens.__len__()
                 for tok in tokens:
-                    self.assertTrue(t_min <= tok < t_max, "Generated token %r out of Murmur3Partitioner range %r..%r" % (tok, t_min, t_max - 1))
-                    self.assertTrue(not all_tokens.__contains__(tok), "Duplicate token %r for nodes-counts %r" % (tok, dc_nodes))
+                    assert t_min <= tok < t_max, "Generated token %r out of Murmur3Partitioner range %r..%r" % (tok, t_min, t_max - 1)
+                    assert not all_tokens.__contains__(tok), "Duplicate token %r for nodes-counts %r" % (tok, dc_nodes)
                     all_tokens.add(tok)
-            self.assertEqual(all_tokens.__len__(), node_count, "Number of tokens %r and number of nodes %r does not match for %r" % (all_tokens.__len__(), node_count, dc_nodes))
+            assert all_tokens.__len__() == node_count, "Number of tokens %r and number of nodes %r does not match for %r" % (all_tokens.__len__(), node_count, dc_nodes)
 
-    def multi_dc_tokens_default_test(self):
+    def test_multi_dc_tokens_default(self):
         self._multi_dc_tokens()
 
-    def multi_dc_tokens_murmur3_test(self):
+    def test_multi_dc_tokens_murmur3(self):
         self._multi_dc_tokens(False)
 
-    def multi_dc_tokens_random_test(self):
+    def test_multi_dc_tokens_random(self):
         self._multi_dc_tokens(True)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/assertions.py
----------------------------------------------------------------------
diff --git a/tools/assertions.py b/tools/assertions.py
index 2e88067..864a4df 100644
--- a/tools/assertions.py
+++ b/tools/assertions.py
@@ -1,18 +1,16 @@
-
 import re
 from time import sleep
+from tools.misc import list_to_hashed_dict
 
 from cassandra import (InvalidRequest, ReadFailure, ReadTimeout, Unauthorized,
                        Unavailable, WriteFailure, WriteTimeout)
 from cassandra.query import SimpleStatement
-from nose.tools import (assert_equal, assert_false, assert_regexp_matches,
-                        assert_true)
 
 
 """
-The assertion methods in this file are used to structure, execute, and test different queries and scenarios. Use these anytime you are trying
-to check the content of a table, the row count of a table, if a query should raise an exception, etc. These methods handle error messaging
-well, and will help discovering and treating bugs.
+The assertion methods in this file are used to structure, execute, and test different queries and scenarios. 
+Use these anytime you are trying to check the content of a table, the row count of a table, if a query should 
+raise an exception, etc. These methods handle error messaging well, and will help discovering and treating bugs.
 
 An example:
 Imagine some table, test:
@@ -57,7 +55,8 @@ def _assert_exception(fun, *args, **kwargs):
             fun(*args)
     except expected as e:
         if matching is not None:
-            assert_regexp_matches(str(e), matching)
+            regex = re.compile(matching)
+            assert regex.match(repr(e)) is None
     except Exception as e:
         raise e
     else:
@@ -73,13 +72,14 @@ def assert_exception(session, query, matching=None, expected=None):
 
 def assert_unavailable(fun, *args):
     """
-    Attempt to execute a function, and assert Unavailable, WriteTimeout, WriteFailure, ReadTimeout, or ReadFailure exception is raised.
+    Attempt to execute a function, and assert Unavailable, WriteTimeout, WriteFailure,
+    ReadTimeout, or ReadFailure exception is raised.
     @param fun Function to be executed
     @param *args Arguments to be passed to the function
 
     Examples:
     assert_unavailable(session2.execute, "SELECT * FROM ttl_table;")
-    assert_unavailable(lambda c: debug(c.execute(statement)), session)
+    assert_unavailable(lambda c: logger.debug(c.execute(statement)), session)
     """
     _assert_exception(fun, *args, expected=(Unavailable, WriteTimeout, WriteFailure, ReadTimeout, ReadFailure))
 
@@ -106,8 +106,10 @@ def assert_unauthorized(session, query, message):
     @param message Expected error message
 
     Examples:
-    assert_unauthorized(session, "ALTER USER cassandra NOSUPERUSER", "You aren't allowed to alter your own superuser status")
-    assert_unauthorized(cathy, "ALTER TABLE ks.cf ADD val int", "User cathy has no ALTER permission on <table ks.cf> or any of its parents")
+    assert_unauthorized(session, "ALTER USER cassandra NOSUPERUSER",
+                        "You aren't allowed to alter your own superuser status")
+    assert_unauthorized(cathy, "ALTER TABLE ks.cf ADD val int",
+                        "User cathy has no ALTER permission on <table ks.cf> or any of its parents")
     """
     assert_exception(session, query, matching=message, expected=Unauthorized)
 
@@ -165,8 +167,8 @@ def assert_all(session, query, expected, cl=None, ignore_order=False, timeout=No
     res = session.execute(simple_query) if timeout is None else session.execute(simple_query, timeout=timeout)
     list_res = _rows_to_list(res)
     if ignore_order:
-        expected = sorted(expected)
-        list_res = sorted(list_res)
+        expected = list_to_hashed_dict(expected)
+        list_res = list_to_hashed_dict(list_res)
     assert list_res == expected, "Expected {} from {}, but got {}".format(expected, query, list_res)
 
 
@@ -185,16 +187,17 @@ def assert_almost_equal(*args, **kwargs):
     vmax = max(args)
     vmin = min(args)
     error_message = '' if 'error_message' not in kwargs else kwargs['error_message']
-    assert vmin > vmax * (1.0 - error) or vmin == vmax, "values not within {:.2f}% of the max: {} ({})".format(error * 100, args, error_message)
+    assert vmin > vmax * (1.0 - error) or vmin == vmax, \
+        "values not within {:.2f}% of the max: {} ({})".format(error * 100, args, error_message)
 
 
 def assert_row_count(session, table_name, expected, where=None):
     """
     Assert the number of rows in a table matches expected.
-    @params session Session to use
+    @param session Session to use
     @param table_name Name of the table to query
     @param expected Number of rows expected to be in table
-
+    @param where string to append to CQL select query as where clause
     Examples:
     assert_row_count(self.session1, 'ttl_table', 1)
     """
@@ -214,6 +217,7 @@ def assert_crc_check_chance_equal(session, table, expected, ks="ks", view=False)
     Assert crc_check_chance equals expected for a given table or view
     @param session Session to use
     @param table Name of the table or view to check
+    @param expected Expected value to assert on that query result matches
     @param ks Optional Name of the keyspace
     @param view Optional Boolean flag indicating if the table is a view
 
@@ -226,13 +230,13 @@ def assert_crc_check_chance_equal(session, table, expected, ks="ks", view=False)
     """
     if view:
         assert_one(session,
-                   "SELECT crc_check_chance from system_schema.views WHERE keyspace_name = 'ks' AND "
-                   "view_name = '{table}';".format(table=table),
+                   "SELECT crc_check_chance from system_schema.views WHERE keyspace_name = '{keyspace}' AND "
+                   "view_name = '{table}';".format(keyspace=ks, table=table),
                    [expected])
     else:
         assert_one(session,
-                   "SELECT crc_check_chance from system_schema.tables WHERE keyspace_name = 'ks' AND "
-                   "table_name = '{table}';".format(table=table),
+                   "SELECT crc_check_chance from system_schema.tables WHERE keyspace_name = '{keyspace}' AND "
+                   "table_name = '{table}';".format(keyspace=ks, table=table),
                    [expected])
 
 
@@ -245,9 +249,9 @@ def assert_length_equal(object_with_length, expected_length):
     Examples:
     assert_length_equal(res, nb_counter)
     """
-    assert_equal(len(object_with_length), expected_length,
-                 "Expected {} to have length {}, but instead is of length {}".format(object_with_length,
-                                                                                     expected_length, len(object_with_length)))
+    assert len(object_with_length) == expected_length, \
+        "Expected {} to have length {}, but instead is of length {}"\
+        .format(object_with_length, expected_length, len(object_with_length))
 
 
 def assert_not_running(node):
@@ -260,7 +264,7 @@ def assert_not_running(node):
         sleep(1)
         attempts = attempts + 1
 
-    assert_false(node.is_running())
+    assert not node.is_running()
 
 
 def assert_read_timeout_or_failure(session, query):
@@ -281,9 +285,15 @@ def assert_stderr_clean(err, acceptable_errors=None):
                              "Failed to connect over JMX; not collecting these stats"]
 
     regex_str = "^({}|\s*|\n)*$".format("|".join(acceptable_errors))
-    match = re.search(regex_str, err)
+    err_str = err.decode("utf-8").strip()
+    # empty string, as good as we can get for a clean stderr output!
+    if not err_str:
+        return
 
-    assert_true(match, "Attempted to check that stderr was empty. Instead, stderr is {}, but the regex used to check against stderr is {}".format(err, regex_str))
+    match = re.search(regex_str, err_str)
+
+    assert match, "Attempted to check that stderr was empty. Instead, stderr is {}, but the regex used to check " \
+                  "stderr is {}".format(err_str, regex_str)
 
 
 def assert_bootstrap_state(tester, node, expected_bootstrap_state):
@@ -298,3 +308,41 @@ def assert_bootstrap_state(tester, node, expected_bootstrap_state):
     """
     session = tester.patient_exclusive_cql_connection(node)
     assert_one(session, "SELECT bootstrapped FROM system.local WHERE key='local'", [expected_bootstrap_state])
+    session.shutdown()
+
+
+def assert_lists_equal_ignoring_order(list1, list2, sort_key=None):
+    """
+    asserts that the contents of the two provided lists are equal
+    but ignoring the order that the items of the lists are actually in
+    :param list1: list to check if it's contents are equal to list2
+    :param list2: list to check if it's contents are equal to list1
+    :param sort_key: if the contents of the list are of type dict, the
+    key to use of each object to sort the overall object with
+    """
+    normalized_list1 = []
+    for obj in list1:
+        normalized_list1.append(obj)
+
+    normalized_list2 = []
+    for obj in list2:
+        normalized_list2.append(obj)
+
+    if not sort_key:
+        sorted_list1 = sorted(normalized_list1, key=lambda elm: elm[0])
+        sorted_list2 = sorted(normalized_list2, key=lambda elm: elm[0])
+    else:
+        # first always sort by "id"
+        # that way we get a two factor sort which will increase the chance of ordering lists exactly the same
+        if not sort_key == 'id' and 'id' in list1[0].keys():
+            sorted_list1 = sorted(sorted(normalized_list1, key=lambda elm: elm["id"]), key=lambda elm: elm[sort_key])
+            sorted_list2 = sorted(sorted(normalized_list2, key=lambda elm: elm["id"]), key=lambda elm: elm[sort_key])
+        else:
+            if isinstance(list1[0]['id'], (int, float)):
+                sorted_list1 = sorted(normalized_list1, key=lambda elm: elm[sort_key])
+                sorted_list2 = sorted(normalized_list2, key=lambda elm: elm[sort_key])
+            else:
+                sorted_list1 = sorted(normalized_list1, key=lambda elm: str(elm[sort_key]))
+                sorted_list2 = sorted(normalized_list2, key=lambda elm: str(elm[sort_key]))
+
+    assert sorted_list1 == sorted_list2

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/context.py
----------------------------------------------------------------------
diff --git a/tools/context.py b/tools/context.py
index a17a64c..0b39534 100644
--- a/tools/context.py
+++ b/tools/context.py
@@ -5,8 +5,6 @@ making those context managers function.
 import logging
 from contextlib import contextmanager
 
-from six import print_
-
 from tools.env import ALLOW_NOISY_LOGGING
 
 
@@ -22,7 +20,7 @@ def log_filter(log_id, expected_strings=None):
     logger.addFilter(log_filter)
     yield
     if log_filter.records_silenced > 0:
-        print_("Logs were filtered to remove messages deemed unimportant, total count: {}".format(log_filter.records_silenced))
+        print("Logs were filtered to remove messages deemed unimportant, total count: %d" % log_filter.records_silenced)
     logger.removeFilter(log_filter)
 
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/data.py
----------------------------------------------------------------------
diff --git a/tools/data.py b/tools/data.py
index 2f86166..d5607e0 100644
--- a/tools/data.py
+++ b/tools/data.py
@@ -1,14 +1,16 @@
 import time
+import logging
 
 from cassandra import ConsistencyLevel
 from cassandra.concurrent import execute_concurrent_with_args
 from cassandra.query import SimpleStatement
-from nose.tools import assert_equal, assert_true
 
-import assertions
-from dtest import debug, create_cf, DtestTimeoutError
+from . import assertions
+from dtest import create_cf, DtestTimeoutError
 from tools.funcutils import get_rate_limited_function
 
+logger = logging.getLogger(__name__)
+
 
 def create_c1c2_table(tester, session, read_repair=None):
     create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}, read_repair=read_repair)
@@ -33,13 +35,13 @@ def query_c1c2(session, key, consistency=ConsistencyLevel.QUORUM, tolerate_missi
     if not tolerate_missing:
         assertions.assert_length_equal(rows, 1)
         res = rows[0]
-        assert_true(len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res)
+        assert len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res
     if must_be_missing:
         assertions.assert_length_equal(rows, 0)
 
 
 def insert_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):
-    upds = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%06d\'" % (i, key, i) for i in xrange(offset * columns_count, columns_count * (offset + 1))]
+    upds = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%06d\'" % (i, key, i) for i in range(offset * columns_count, columns_count * (offset + 1))]
     query = 'BEGIN BATCH %s; APPLY BATCH' % '; '.join(upds)
     simple_query = SimpleStatement(query, consistency_level=consistency)
     session.execute(simple_query)
@@ -49,8 +51,8 @@ def query_columns(tester, session, key, columns_count, consistency=ConsistencyLe
     query = SimpleStatement('SELECT c, v FROM cf WHERE key=\'k%s\' AND c >= \'c%06d\' AND c <= \'c%06d\'' % (key, offset, columns_count + offset - 1), consistency_level=consistency)
     res = list(session.execute(query))
     assertions.assert_length_equal(res, columns_count)
-    for i in xrange(0, columns_count):
-        assert_equal(res[i][1], 'value{}'.format(i + offset))
+    for i in range(0, columns_count):
+        assert res[i][1] == 'value{}'.format(i + offset)
 
 
 # Simple puts and get (on one row), testing both reads by names and by slice,
@@ -74,20 +76,20 @@ def putget(cluster, session, cl=ConsistencyLevel.QUORUM):
 
 
 def _put_with_overwrite(cluster, session, nb_keys, cl=ConsistencyLevel.QUORUM):
-    for k in xrange(0, nb_keys):
-        kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i, k, i) for i in xrange(0, 100)]
+    for k in range(0, nb_keys):
+        kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i, k, i) for i in range(0, 100)]
         query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
         session.execute(query)
         time.sleep(.01)
     cluster.flush()
-    for k in xrange(0, nb_keys):
-        kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 4, k, i * 2) for i in xrange(0, 50)]
+    for k in range(0, nb_keys):
+        kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 4, k, i * 2) for i in range(0, 50)]
         query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
         session.execute(query)
         time.sleep(.01)
     cluster.flush()
-    for k in xrange(0, nb_keys):
-        kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 20, k, i * 5) for i in xrange(0, 20)]
+    for k in range(0, nb_keys):
+        kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 20, k, i * 5) for i in range(0, 20)]
         query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
         session.execute(query)
         time.sleep(.01)
@@ -96,13 +98,13 @@ def _put_with_overwrite(cluster, session, nb_keys, cl=ConsistencyLevel.QUORUM):
 
 def _validate_row(cluster, res):
     assertions.assert_length_equal(res, 100)
-    for i in xrange(0, 100):
+    for i in range(0, 100):
         if i % 5 == 0:
-            assert_equal(res[i][2], 'value{}'.format(i * 4), 'for {}, expecting value{}, got {}'.format(i, i * 4, res[i][2]))
+            assert res[i][2] == 'value{}'.format(i * 4), 'for {}, expecting value{}, got {}'.format(i, i * 4, res[i][2])
         elif i % 2 == 0:
-            assert_equal(res[i][2], 'value{}'.format(i * 2), 'for {}, expecting value{}, got {}'.format(i, i * 2, res[i][2]))
+            assert res[i][2] == 'value{}'.format(i * 2), 'for {}, expecting value{}, got {}'.format(i, i * 2, res[i][2])
         else:
-            assert_equal(res[i][2], 'value{}'.format(i), 'for {}, expecting value{}, got {}'.format(i, i, res[i][2]))
+            assert res[i][2] == 'value{}'.format(i), 'for {}, expecting value{}, got {}'.format(i, i, res[i][2])
 
 
 # Simple puts and range gets, with overwrites and flushes between inserts to
@@ -116,7 +118,7 @@ def range_putget(cluster, session, cl=ConsistencyLevel.QUORUM):
     rows = [result for result in paged_results]
 
     assertions.assert_length_equal(rows, keys * 100)
-    for k in xrange(0, keys):
+    for k in range(0, keys):
         res = rows[:100]
         del rows[:100]
         _validate_row(cluster, res)
@@ -158,9 +160,9 @@ def block_until_index_is_built(node, session, keyspace, table_name, idx_name):
     DtestTimeoutError if it is not.
     """
     start = time.time()
-    rate_limited_debug = get_rate_limited_function(debug, 5)
+    rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
     while time.time() < start + 30:
-        rate_limited_debug("waiting for index to build")
+        rate_limited_debug_logger("waiting for index to build")
         time.sleep(1)
         if index_is_built(node, session, keyspace, table_name, idx_name):
             break

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/datahelp.py
----------------------------------------------------------------------
diff --git a/tools/datahelp.py b/tools/datahelp.py
index 891fac7..86afc03 100644
--- a/tools/datahelp.py
+++ b/tools/datahelp.py
@@ -35,13 +35,13 @@ def parse_headers_into_list(data):
     # throw out leading/trailing space and pipes
     # so we can split on the data without getting
     # extra empty fields
-    rows = map(strip, data.split('\n'))
+    rows = list(map(strip, data.split('\n')))
 
     # remove any remaining empty lines (i.e. '') from data
-    rows = filter(None, rows)
+    rows = [_f for _f in rows if _f]
 
     # separate headers from actual data and remove extra spaces from them
-    headers = [unicode(h.strip()) for h in rows.pop(0).split('|')]
+    headers = [str(h.strip()) for h in rows.pop(0).split('|')]
     return headers
 
 
@@ -77,10 +77,10 @@ def parse_row_into_dict(row, headers, format_funcs=None):
             )
         return multirows
 
-    row_map = dict(zip(headers, row_cells))
+    row_map = dict(list(zip(headers, row_cells)))
 
     if format_funcs:
-        for colname, value in row_map.items():
+        for colname, value in list(row_map.items()):
             func = format_funcs.get(colname)
 
             if func is not None:
@@ -110,10 +110,10 @@ def parse_data_into_dicts(data, format_funcs=None):
     # throw out leading/trailing space and pipes
     # so we can split on the data without getting
     # extra empty fields
-    rows = map(strip, data.split('\n'))
+    rows = list(map(strip, data.split('\n')))
 
     # remove any remaining empty/decoration lines (i.e. '') from data
-    rows = filter(row_describes_data, rows)
+    rows = list(filter(row_describes_data, rows))
 
     # remove headers
     headers = parse_headers_into_list(rows.pop(0))
@@ -149,13 +149,13 @@ def create_rows(data, session, table_name, cl=None, format_funcs=None, prefix=''
     # use the first dictionary to build a prepared statement for all
     prepared = session.prepare(
         "{prefix} INSERT INTO {table} ({cols}) values ({vals}) {postfix}".format(
-            prefix=prefix, table=table_name, cols=', '.join(dicts[0].keys()),
-            vals=', '.join('?' for k in dicts[0].keys()), postfix=postfix)
+            prefix=prefix, table=table_name, cols=', '.join(list(dicts[0].keys())),
+            vals=', '.join('?' for k in list(dicts[0].keys())), postfix=postfix)
     )
     if cl is not None:
         prepared.consistency_level = cl
 
-    query_results = execute_concurrent_with_args(session, prepared, [d.values() for d in dicts])
+    query_results = execute_concurrent_with_args(session, prepared, [list(d.values()) for d in dicts])
 
     for i, (status, result_or_exc) in enumerate(query_results):
         # should maybe check status here before appening to expected values

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/decorators.py
----------------------------------------------------------------------
diff --git a/tools/decorators.py b/tools/decorators.py
deleted file mode 100644
index a11a4fc..0000000
--- a/tools/decorators.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import functools
-import unittest
-from distutils.version import LooseVersion
-
-from nose.plugins.attrib import attr
-from nose.tools import assert_in, assert_is_instance
-
-from dtest import DISABLE_VNODES
-
-
-class since(object):
-
-    def __init__(self, cass_version, max_version=None):
-        self.cass_version = LooseVersion(cass_version)
-        self.max_version = max_version
-        if self.max_version is not None:
-            self.max_version = LooseVersion(self.max_version)
-
-    def _skip_msg(self, version):
-        if version < self.cass_version:
-            return "%s < %s" % (version, self.cass_version)
-        if self.max_version and version > self.max_version:
-            return "%s > %s" % (version, self.max_version)
-
-    def _wrap_setUp(self, cls):
-        orig_setUp = cls.setUp
-
-        @functools.wraps(cls.setUp)
-        def wrapped_setUp(obj, *args, **kwargs):
-            obj.max_version = self.max_version
-            orig_setUp(obj, *args, **kwargs)
-            version = obj.cluster.version()
-            msg = self._skip_msg(version)
-            if msg:
-                obj.skip(msg)
-
-        cls.setUp = wrapped_setUp
-        return cls
-
-    def _wrap_function(self, f):
-        @functools.wraps(f)
-        def wrapped(obj):
-            obj.max_version = self.max_version
-            version = obj.cluster.version()
-            msg = self._skip_msg(version)
-            if msg:
-                obj.skip(msg)
-            f(obj)
-        return wrapped
-
-    def __call__(self, skippable):
-        if isinstance(skippable, type):
-            return self._wrap_setUp(skippable)
-        return self._wrap_function(skippable)
-
-
-def no_vnodes():
-    """
-    Skips the decorated test or test class if using vnodes.
-    """
-    return unittest.skipIf(not DISABLE_VNODES, 'Test disabled for vnodes')
-
-
-def known_failure(failure_source, jira_url, flaky=False, notes=''):
-    """
-    Tag a test as a known failure. Associate it with the URL for a JIRA
-    ticket and tag it as flaky or not.
-
-    Valid values for failure_source include: 'cassandra', 'test', 'driver', and
-    'systemic'.
-
-    To run all known failures, use the functionality provided by the nosetests
-    attrib plugin, using the known_failure attributes:
-
-        # only run tests that are known to fail
-        $ nosetests -a known_failure
-        # only run tests that are not known to fail
-        $ nosetests -a !known_failure
-        # only run tests that fail because of cassandra bugs
-        $ nosetests -A "'cassandra' in [d['failure_source'] for d in known_failure]"
-
-    Known limitations: a given test may only be tagged once and still work as
-    expected with the attrib plugin machinery; if you decorate a test with
-    known_failure multiple times, the known_failure attribute of that test
-    will have the value applied by the outermost instance of the decorator.
-    """
-    valid_failure_sources = ('cassandra', 'test', 'systemic', 'driver')
-
-    def wrapper(f):
-        assert_in(failure_source, valid_failure_sources)
-        assert_is_instance(flaky, bool)
-
-        try:
-            existing_failure_annotations = f.known_failure
-        except AttributeError:
-            existing_failure_annotations = []
-
-        new_annotation = [{'failure_source': failure_source, 'jira_url': jira_url, 'notes': notes, 'flaky': flaky}]
-
-        failure_annotations = existing_failure_annotations + new_annotation
-
-        tagged_func = attr(known_failure=failure_annotations)(f)
-
-        return tagged_func
-
-    return wrapper

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/files.py
----------------------------------------------------------------------
diff --git a/tools/files.py b/tools/files.py
index c81cea0..7f0cd97 100644
--- a/tools/files.py
+++ b/tools/files.py
@@ -3,8 +3,9 @@ import os
 import re
 import sys
 import tempfile
+import logging
 
-from dtest import debug  # Depending on dtest is not good long-term.
+logger = logging.getLogger(__name__)
 
 
 def replace_in_file(filepath, search_replacements):
@@ -37,5 +38,5 @@ def size_of_files_in_dir(dir_name, verbose=True):
     """
     files = [os.path.join(dir_name, f) for f in os.listdir(dir_name)]
     if verbose:
-        debug('getting sizes of these files: {}'.format(files))
+        logger.debug('getting sizes of these files: {}'.format(files))
     return sum(os.path.getsize(f) for f in files)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/git.py
----------------------------------------------------------------------
diff --git a/tools/git.py b/tools/git.py
index 2f5afa6..7daf4b8 100644
--- a/tools/git.py
+++ b/tools/git.py
@@ -1,22 +1,22 @@
 import subprocess
+import logging
 
-from dtest import CASSANDRA_DIR, debug
+logger = logging.getLogger(__name__)
 
 
-def cassandra_git_branch(cdir=None):
+def cassandra_git_branch(cassandra_dir):
     '''Get the name of the git branch at CASSANDRA_DIR.
     '''
-    cdir = CASSANDRA_DIR if cdir is None else cdir
     try:
-        p = subprocess.Popen(['git', 'branch'], cwd=cdir,
+        p = subprocess.Popen(['git', 'branch'], cwd=cassandra_dir,
                              stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     except OSError as e:  # e.g. if git isn't available, just give up and return None
-        debug('shelling out to git failed: {}'.format(e))
+        logger.debug('shelling out to git failed: {}'.format(e))
         return
 
     out, err = p.communicate()
     # fail if git failed
     if p.returncode != 0:
-        raise RuntimeError('Git printed error: {err}'.format(err=err))
-    [current_branch_line] = [line for line in out.splitlines() if line.startswith('*')]
+        raise RuntimeError('Git printed error: {err}'.format(err=err.decode("utf-8")))
+    [current_branch_line] = [line for line in out.decode("utf-8").splitlines() if line.startswith('*')]
     return current_branch_line[1:].strip()

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/hacks.py
----------------------------------------------------------------------
diff --git a/tools/hacks.py b/tools/hacks.py
index 9faa778..f68e20d 100644
--- a/tools/hacks.py
+++ b/tools/hacks.py
@@ -4,13 +4,14 @@ weirdnesses in Cassandra.
 """
 import os
 import time
+import logging
 
 from cassandra.concurrent import execute_concurrent
-from nose.tools import assert_less_equal
 
-import dtest
 from tools.funcutils import get_rate_limited_function
 
+logger = logging.getLogger(__name__)
+
 
 def _files_in(directory):
     return {
@@ -20,7 +21,7 @@ def _files_in(directory):
 
 def advance_to_next_cl_segment(session, commitlog_dir,
                                keyspace_name='ks', table_name='junk_table',
-                               timeout=60, debug=True):
+                               timeout=60):
     """
     This is a hack to work around problems like CASSANDRA-11811.
 
@@ -29,15 +30,6 @@ def advance_to_next_cl_segment(session, commitlog_dir,
     replaying some mutations that initialize system tables, so this function
     advances the node to the next CL by filling up the first one.
     """
-    if debug:
-        _debug = dtest.debug
-    else:
-        def _debug(*args, **kwargs):
-            """
-            noop debug method
-            """
-            pass
-
     session.execute(
         'CREATE TABLE {ks}.{tab} ('
         'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, '
@@ -58,17 +50,15 @@ def advance_to_next_cl_segment(session, commitlog_dir,
 
     start = time.time()
     stop_time = start + timeout
-    rate_limited_debug = get_rate_limited_function(_debug, 5)
-    _debug('attempting to write until we start writing to new CL segments: {}'.format(initial_cl_files))
+    rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
+    logger.debug('attempting to write until we start writing to new CL segments: {}'.format(initial_cl_files))
 
     while _files_in(commitlog_dir) <= initial_cl_files:
         elapsed = time.time() - start
-        rate_limited_debug('  commitlog-advancing load step has lasted {s:.2f}s'.format(s=elapsed))
-        assert_less_equal(
-            time.time(), stop_time,
-            "It's been over a {s}s and we haven't written a new "
+        rate_limited_debug_logger('  commitlog-advancing load step has lasted {s:.2f}s'.format(s=elapsed))
+        assert (
+            time.time() <= stop_time), "It's been over a {s}s and we haven't written a new " + \
             "commitlog segment. Something is wrong.".format(s=timeout)
-        )
         execute_concurrent(
             session,
             ((prepared_insert, ()) for _ in range(1000)),
@@ -76,4 +66,4 @@ def advance_to_next_cl_segment(session, commitlog_dir,
             raise_on_first_error=True,
         )
 
-    _debug('present commitlog segments: {}'.format(_files_in(commitlog_dir)))
+    logger.debug('present commitlog segments: {}'.format(_files_in(commitlog_dir)))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/intervention.py
----------------------------------------------------------------------
diff --git a/tools/intervention.py b/tools/intervention.py
index 27b1977..6a8add8 100644
--- a/tools/intervention.py
+++ b/tools/intervention.py
@@ -1,8 +1,10 @@
 import random
 import time
+import logging
+
 from threading import Thread
 
-from dtest import debug
+logger = logging.getLogger(__name__)
 
 
 class InterruptBootstrap(Thread):
@@ -38,9 +40,9 @@ class InterruptCompaction(Thread):
         self.node.watch_log_for("Compacting(.*)%s" % (self.tablename,), from_mark=self.mark, filename=self.filename)
         if self.delay > 0:
             random_delay = random.uniform(0, self.delay)
-            debug("Sleeping for {} seconds".format(random_delay))
+            logger.debug("Sleeping for {} seconds".format(random_delay))
             time.sleep(random_delay)
-        debug("Killing node {}".format(self.node.address()))
+        logger.debug("Killing node {}".format(self.node.address()))
         self.node.stop(gently=False)
 
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/jmxutils.py
----------------------------------------------------------------------
diff --git a/tools/jmxutils.py b/tools/jmxutils.py
index 7468226..83459e0 100644
--- a/tools/jmxutils.py
+++ b/tools/jmxutils.py
@@ -1,13 +1,16 @@
 import json
 import os
 import subprocess
-from urllib2 import urlopen
+import urllib.request
+import urllib.parse
+import logging
 
 import ccmlib.common as common
 
-from dtest import warning
 from distutils.version import LooseVersion
 
+logger = logging.getLogger(__name__)
+
 JOLOKIA_JAR = os.path.join('lib', 'jolokia-jvm-1.2.3-agent.jar')
 CLASSPATH_SEP = ';' if common.is_win() else ':'
 JVM_OPTIONS = "jvm.options"
@@ -18,7 +21,7 @@ def jolokia_classpath():
         tools_jar = os.path.join(os.environ['JAVA_HOME'], 'lib', 'tools.jar')
         return CLASSPATH_SEP.join((tools_jar, JOLOKIA_JAR))
     else:
-        warning("Environment variable $JAVA_HOME not present: jmx-based " +
+        logger.warning("Environment variable $JAVA_HOME not present: jmx-based " +
                 "tests may fail because of missing $JAVA_HOME/lib/tools.jar.")
         return JOLOKIA_JAR
 
@@ -50,7 +53,7 @@ def make_mbean(package, type, **kwargs):
     rv = 'org.apache.cassandra.%s:type=%s' % (package, type)
     if kwargs:
         rv += ',' + ','.join('{k}={v}'.format(k=k, v=v)
-                             for k, v in kwargs.iteritems())
+                             for k, v in kwargs.items())
     return rv
 
 
@@ -204,9 +207,9 @@ class JolokiaAgent(object):
         try:
             subprocess.check_output(args, stderr=subprocess.STDOUT)
         except subprocess.CalledProcessError as exc:
-            print "Failed to start jolokia agent (command was: %s): %s" % (' '.join(args), exc)
-            print "Exit status was: %d" % (exc.returncode,)
-            print "Output was: %s" % (exc.output,)
+            print("Failed to start jolokia agent (command was: %s): %s" % (' '.join(args), exc))
+            print("Exit status was: %d" % (exc.returncode,))
+            print("Output was: %s" % (exc.output,))
             raise
 
     def stop(self):
@@ -220,15 +223,16 @@ class JolokiaAgent(object):
         try:
             subprocess.check_output(args, stderr=subprocess.STDOUT)
         except subprocess.CalledProcessError as exc:
-            print "Failed to stop jolokia agent (command was: %s): %s" % (' '.join(args), exc)
-            print "Exit status was: %d" % (exc.returncode,)
-            print "Output was: %s" % (exc.output,)
+            print("Failed to stop jolokia agent (command was: %s): %s" % (' '.join(args), exc))
+            print("Exit status was: %d" % (exc.returncode,))
+            print("Output was: %s" % (exc.output,))
             raise
 
     def _query(self, body, verbose=True):
-        request_data = json.dumps(body)
+        request_data = json.dumps(body).encode("utf-8")
         url = 'http://%s:8778/jolokia/' % (self.node.network_interfaces['binary'][0],)
-        response = urlopen(url, data=request_data, timeout=10.0)
+        req = urllib.request.Request(url)
+        response = urllib.request.urlopen(req, data=request_data, timeout=10.0)
         if response.code != 200:
             raise Exception("Failed to query Jolokia agent; HTTP response code: %d; response: %s" % (response.code, response.readlines()))
 
@@ -237,9 +241,9 @@ class JolokiaAgent(object):
         if response['status'] != 200:
             stacktrace = response.get('stacktrace')
             if stacktrace and verbose:
-                print "Stacktrace from Jolokia error follows:"
+                print("Stacktrace from Jolokia error follows:")
                 for line in stacktrace.splitlines():
-                    print line
+                    print(line)
             raise Exception("Jolokia agent returned non-200 status: %s" % (response,))
         return response
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/metadata_wrapper.py
----------------------------------------------------------------------
diff --git a/tools/metadata_wrapper.py b/tools/metadata_wrapper.py
index 267acc5..43bdbfb 100644
--- a/tools/metadata_wrapper.py
+++ b/tools/metadata_wrapper.py
@@ -1,9 +1,7 @@
 from abc import ABCMeta, abstractproperty
 
 
-class UpdatingMetadataWrapperBase(object):
-    __metaclass__ = ABCMeta
-
+class UpdatingMetadataWrapperBase(object, metaclass=ABCMeta):
     @abstractproperty
     def _wrapped(self):
         pass

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/misc.py
----------------------------------------------------------------------
diff --git a/tools/misc.py b/tools/misc.py
index 0ca7adf..aa2c084 100644
--- a/tools/misc.py
+++ b/tools/misc.py
@@ -1,11 +1,15 @@
 import os
 import subprocess
 import time
+import hashlib
+import logging
+
 from collections import Mapping
 
 from ccmlib.node import Node
 
-from dtest import debug
+
+logger = logging.getLogger(__name__)
 
 
 # work for cluster started by populate
@@ -53,23 +57,65 @@ def generate_ssl_stores(base_dir, passphrase='cassandra'):
     """
 
     if os.path.exists(os.path.join(base_dir, 'keystore.jks')):
-        debug("keystores already exists - skipping generation of ssl keystores")
+        logger.debug("keystores already exists - skipping generation of ssl keystores")
         return
 
-    debug("generating keystore.jks in [{0}]".format(base_dir))
+    logger.debug("generating keystore.jks in [{0}]".format(base_dir))
     subprocess.check_call(['keytool', '-genkeypair', '-alias', 'ccm_node', '-keyalg', 'RSA', '-validity', '365',
                            '-keystore', os.path.join(base_dir, 'keystore.jks'), '-storepass', passphrase,
                            '-dname', 'cn=Cassandra Node,ou=CCMnode,o=DataStax,c=US', '-keypass', passphrase])
-    debug("exporting cert from keystore.jks in [{0}]".format(base_dir))
+    logger.debug("exporting cert from keystore.jks in [{0}]".format(base_dir))
     subprocess.check_call(['keytool', '-export', '-rfc', '-alias', 'ccm_node',
                            '-keystore', os.path.join(base_dir, 'keystore.jks'),
                            '-file', os.path.join(base_dir, 'ccm_node.cer'), '-storepass', passphrase])
-    debug("importing cert into truststore.jks in [{0}]".format(base_dir))
+    logger.debug("importing cert into truststore.jks in [{0}]".format(base_dir))
     subprocess.check_call(['keytool', '-import', '-file', os.path.join(base_dir, 'ccm_node.cer'),
                            '-alias', 'ccm_node', '-keystore', os.path.join(base_dir, 'truststore.jks'),
                            '-storepass', passphrase, '-noprompt'])
 
 
+def list_to_hashed_dict(list):
+    """
+    takes a list and hashes the contents and puts them into a dict so the contents can be compared
+    without order. unfortunately, we need to do a little massaging of our input; the result from
+    the driver can return a OrderedMapSerializedKey (e.g. [0, 9, OrderedMapSerializedKey([(10, 11)])])
+    but our "expected" list is simply a list of elements (or list of list). this means if we
+    hash the values as is we'll get different results. to avoid this, when we see a dict,
+    convert the raw values (key, value) into a list and insert that list into a new list
+    :param list the list to convert into a dict
+    :return: a dict containing the contents fo the list with the hashed contents
+    """
+    hashed_dict = dict()
+    for item_lst in list:
+        normalized_list = []
+        for item in item_lst:
+            if hasattr(item, "items"):
+                tmp_list = []
+                for a, b in item.items():
+                    tmp_list.append(a)
+                    tmp_list.append(b)
+                normalized_list.append(tmp_list)
+            else:
+                normalized_list.append(item)
+        list_digest = hashlib.sha256(str(normalized_list).encode('utf-8', 'ignore')).hexdigest()
+        hashed_dict[list_digest] = normalized_list
+    return hashed_dict
+
+
+def get_current_test_name():
+    """
+    See https://docs.pytest.org/en/latest/example/simple.html#pytest-current-test-environment-variable
+    :return: returns just the name of the current running test name
+    """
+    pytest_current_test = os.environ.get('PYTEST_CURRENT_TEST')
+    test_splits = pytest_current_test.split("::")
+    current_test_name = test_splits[len(test_splits) - 1]
+    current_test_name = current_test_name.replace(" (call)", "")
+    current_test_name = current_test_name.replace(" (setup)", "")
+    current_test_name = current_test_name.replace(" (teardown)", "")
+    return current_test_name
+
+
 class ImmutableMapping(Mapping):
     """
     Convenience class for when you want an immutable-ish map.

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/tools/paging.py
----------------------------------------------------------------------
diff --git a/tools/paging.py b/tools/paging.py
index a9b6756..0d99bfd 100644
--- a/tools/paging.py
+++ b/tools/paging.py
@@ -1,7 +1,7 @@
 import time
 
 from tools.datahelp import flatten_into_set
-
+from tools.misc import list_to_hashed_dict
 
 class Page(object):
     data = None
@@ -165,7 +165,8 @@ class PageAssertionMixin(object):
     """Can be added to subclasses of unittest.Tester"""
 
     def assertEqualIgnoreOrder(self, actual, expected):
-        return self.assertItemsEqual(actual, expected)
+        assert list_to_hashed_dict(actual) == list_to_hashed_dict(expected)
+
 
     def assertIsSubsetOf(self, subset, superset):
-        self.assertLessEqual(flatten_into_set(subset), flatten_into_set(superset))
+        assert flatten_into_set(subset) <= flatten_into_set(superset)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/topology_test.py
----------------------------------------------------------------------
diff --git a/topology_test.py b/topology_test.py
index 02d1806..47426f0 100644
--- a/topology_test.py
+++ b/topology_test.py
@@ -1,21 +1,24 @@
 import re
 import time
+import pytest
+import logging
+
 from threading import Thread
-from unittest import skip
 
 from cassandra import ConsistencyLevel
 from ccmlib.node import TimeoutError, ToolError
-from nose.plugins.attrib import attr
 
-from dtest import Tester, debug, create_ks, create_cf
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import assert_almost_equal, assert_all, assert_none
 from tools.data import insert_c1c2, query_c1c2
-from tools.decorators import no_vnodes, since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class TestTopology(Tester):
 
-    def do_not_join_ring_test(self):
+    def test_do_not_join_ring(self):
         """
         @jira_ticket CASSANDRA-9034
         Check that AssertionError is not thrown on SizeEstimatesRecorder before node joins ring
@@ -32,19 +35,19 @@ class TestTopology(Tester):
         node1.stop(gently=False)
 
     @since('3.0.11')
-    def size_estimates_multidc_test(self):
+    def test_size_estimates_multidc(self):
         """
         Test that primary ranges are correctly generated on
         system.size_estimates for multi-dc, multi-ks scenario
         @jira_ticket CASSANDRA-9639
         """
-        debug("Creating cluster")
+        logger.debug("Creating cluster")
         cluster = self.cluster
         cluster.set_configuration_options(values={'num_tokens': 2})
         cluster.populate([2, 1])
         node1_1, node1_2, node2_1 = cluster.nodelist()
 
-        debug("Setting tokens")
+        logger.debug("Setting tokens")
         node1_tokens, node2_tokens, node3_tokens = ['-6639341390736545756,-2688160409776496397',
                                                     '-2506475074448728501,8473270337963525440',
                                                     '-3736333188524231709,8673615181726552074']
@@ -53,20 +56,20 @@ class TestTopology(Tester):
         node2_1.set_configuration_options(values={'initial_token': node3_tokens})
         cluster.set_configuration_options(values={'num_tokens': 2})
 
-        debug("Starting cluster")
+        logger.debug("Starting cluster")
         cluster.start()
 
         out, _, _ = node1_1.nodetool('ring')
-        debug("Nodetool ring output {}".format(out))
+        logger.debug("Nodetool ring output {}".format(out))
 
-        debug("Creating keyspaces")
+        logger.debug("Creating keyspaces")
         session = self.patient_cql_connection(node1_1)
         create_ks(session, 'ks1', 3)
         create_ks(session, 'ks2', {'dc1': 2})
         create_cf(session, 'ks1.cf1', columns={'c1': 'text', 'c2': 'text'})
         create_cf(session, 'ks2.cf2', columns={'c1': 'text', 'c2': 'text'})
 
-        debug("Refreshing size estimates")
+        logger.debug("Refreshing size estimates")
         node1_1.nodetool('refreshsizeestimates')
         node1_2.nodetool('refreshsizeestimates')
         node2_1.nodetool('refreshsizeestimates')
@@ -94,7 +97,7 @@ class TestTopology(Tester):
         127.0.0.3   8673615181726552074
         """
 
-        debug("Checking node1_1 size_estimates primary ranges")
+        logger.debug("Checking node1_1 size_estimates primary ranges")
         session = self.patient_exclusive_cql_connection(node1_1)
         assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                             "WHERE keyspace_name = 'ks1'", [['-3736333188524231709', '-2688160409776496397'],
@@ -107,7 +110,7 @@ class TestTopology(Tester):
                                                             ['8473270337963525440', '8673615181726552074'],
                                                             ['8673615181726552074', '-9223372036854775808']])
 
-        debug("Checking node1_2 size_estimates primary ranges")
+        logger.debug("Checking node1_2 size_estimates primary ranges")
         session = self.patient_exclusive_cql_connection(node1_2)
         assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                             "WHERE keyspace_name = 'ks1'", [['-2506475074448728501', '8473270337963525440'],
@@ -116,7 +119,7 @@ class TestTopology(Tester):
                             "WHERE keyspace_name = 'ks2'", [['-2506475074448728501', '8473270337963525440'],
                                                             ['-2688160409776496397', '-2506475074448728501']])
 
-        debug("Checking node2_1 size_estimates primary ranges")
+        logger.debug("Checking node2_1 size_estimates primary ranges")
         session = self.patient_exclusive_cql_connection(node2_1)
         assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                             "WHERE keyspace_name = 'ks1'", [['-6639341390736545756', '-3736333188524231709'],
@@ -124,7 +127,7 @@ class TestTopology(Tester):
         assert_none(session, "SELECT range_start, range_end FROM system.size_estimates "
                              "WHERE keyspace_name = 'ks2'")
 
-    def simple_decommission_test(self):
+    def test_simple_decommission(self):
         """
         @jira_ticket CASSANDRA-9912
         Check that AssertionError is not thrown on SizeEstimatesRecorder after node is decommissioned
@@ -151,8 +154,8 @@ class TestTopology(Tester):
         # described in 9912. Do not remove it.
         time.sleep(10)
 
-    @skip('Hangs on CI for 2.1')
-    def concurrent_decommission_not_allowed_test(self):
+    @pytest.mark.skip(reason='Hangs on CI for 2.1')
+    def test_concurrent_decommission_not_allowed(self):
         """
         Test concurrent decommission is not allowed
         """
@@ -179,7 +182,7 @@ class TestTopology(Tester):
         node2.watch_log_for('DECOMMISSIONING', filename='debug.log')
 
         # Launch a second decommission, should fail
-        with self.assertRaises(ToolError):
+        with pytest.raises(ToolError):
             node2.nodetool('decommission')
 
         # Check data is correctly forwarded to node1 after node2 is decommissioned
@@ -187,17 +190,20 @@ class TestTopology(Tester):
         node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
         session = self.patient_cql_connection(node1)
         session.execute('USE ks')
-        for n in xrange(0, 10000):
+        for n in range(0, 10000):
             query_c1c2(session, n, ConsistencyLevel.ONE)
 
     @since('3.10')
-    def resumable_decommission_test(self):
+    def test_resumable_decommission(self):
         """
         @jira_ticket CASSANDRA-12008
 
         Test decommission operation is resumable
         """
-        self.ignore_log_patterns = [r'Streaming error occurred', r'Error while decommissioning node', r'Remote peer 127.0.0.2 failed stream session', r'Remote peer 127.0.0.2:7000 failed stream session']
+        self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred',
+                                                        r'Error while decommissioning node',
+                                                        r'Remote peer 127.0.0.2 failed stream session',
+                                                        r'Remote peer 127.0.0.2:7000 failed stream session']
         cluster = self.cluster
         cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
         cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
@@ -211,7 +217,7 @@ class TestTopology(Tester):
         insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
 
         # Execute first rebuild, should fail
-        with self.assertRaises(ToolError):
+        with pytest.raises(ToolError):
             if cluster.version() >= '4.0':
                 script = ['./byteman/4.0/decommission_failure_inject.btm']
             else:
@@ -235,7 +241,7 @@ class TestTopology(Tester):
         node3.stop(gently=False)
         session = self.patient_exclusive_cql_connection(node1)
         session.execute('USE ks')
-        for i in xrange(0, 10000):
+        for i in range(0, 10000):
             query_c1c2(session, i, ConsistencyLevel.ONE)
         node1.stop(gently=False)
         node3.start()
@@ -244,11 +250,11 @@ class TestTopology(Tester):
         node3.watch_log_for('Starting listening for CQL clients', from_mark=mark)
         session = self.patient_exclusive_cql_connection(node3)
         session.execute('USE ks')
-        for i in xrange(0, 10000):
+        for i in range(0, 10000):
             query_c1c2(session, i, ConsistencyLevel.ONE)
 
-    @no_vnodes()
-    def movement_test(self):
+    @pytest.mark.no_vnodes
+    def test_movement(self):
         cluster = self.cluster
 
         # Create an unbalanced ring
@@ -281,7 +287,7 @@ class TestTopology(Tester):
         cluster.cleanup()
 
         # Check we can get all the keys
-        for n in xrange(0, 30000):
+        for n in range(0, 30000):
             query_c1c2(session, n, ConsistencyLevel.ONE)
 
         # Now the load should be basically even
@@ -291,8 +297,8 @@ class TestTopology(Tester):
         assert_almost_equal(sizes[0], sizes[2])
         assert_almost_equal(sizes[1], sizes[2])
 
-    @no_vnodes()
-    def decommission_test(self):
+    @pytest.mark.no_vnodes
+    def test_decommission(self):
         cluster = self.cluster
 
         tokens = cluster.balanced_tokens(4)
@@ -317,17 +323,17 @@ class TestTopology(Tester):
         time.sleep(.5)
 
         # Check we can get all the keys
-        for n in xrange(0, 30000):
+        for n in range(0, 30000):
             query_c1c2(session, n, ConsistencyLevel.QUORUM)
 
         sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
-        debug(sizes)
+        logger.debug(sizes)
         assert_almost_equal(sizes[0], sizes[1])
         assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2])
         assert_almost_equal(sizes[2], init_size)
 
-    @no_vnodes()
-    def move_single_node_test(self):
+    @pytest.mark.no_vnodes
+    def test_move_single_node(self):
         """ Test moving a node in a single-node cluster (#4200) """
         cluster = self.cluster
 
@@ -350,12 +356,12 @@ class TestTopology(Tester):
         cluster.cleanup()
 
         # Check we can get all the keys
-        for n in xrange(0, 10000):
+        for n in range(0, 10000):
             query_c1c2(session, n, ConsistencyLevel.ONE)
 
     @since('3.0')
-    def decommissioned_node_cant_rejoin_test(self):
-        '''
+    def test_decommissioned_node_cant_rejoin(self):
+        """
         @jira_ticket CASSANDRA-8801
 
         Test that a decommissioned node can't rejoin the cluster by:
@@ -365,22 +371,19 @@ class TestTopology(Tester):
         - asserting that the "decommissioned node won't rejoin" error is in the
         logs for that node and
         - asserting that the node is not running.
-        '''
+        """
         rejoin_err = 'This node was decommissioned and will not rejoin the ring'
-        try:
-            self.ignore_log_patterns = list(self.ignore_log_patterns)
-        except AttributeError:
-            self.ignore_log_patterns = []
-        self.ignore_log_patterns.append(rejoin_err)
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            rejoin_err]
 
         self.cluster.populate(3).start(wait_for_binary_proto=True)
         node1, node2, node3 = self.cluster.nodelist()
 
-        debug('decommissioning...')
+        logger.debug('decommissioning...')
         node3.decommission(force=self.cluster.version() >= '4.0')
-        debug('stopping...')
+        logger.debug('stopping...')
         node3.stop()
-        debug('attempting restart...')
+        logger.debug('attempting restart...')
         node3.start(wait_other_notice=False)
         try:
             # usually takes 3 seconds, so give it a generous 15
@@ -390,9 +393,8 @@ class TestTopology(Tester):
             # let that pass and move on to string assertion below
             pass
 
-        self.assertIn(rejoin_err,
-                      '\n'.join(['\n'.join(err_list)
-                                 for err_list in node3.grep_log_for_errors()]))
+        assert re.search(rejoin_err,
+                         '\n'.join(['\n'.join(err_list) for err_list in node3.grep_log_for_errors()]), re.MULTILINE)
 
         # Give the node some time to shut down once it has detected
         # its invalid state. If it doesn't shut down in the 30 seconds,
@@ -401,10 +403,10 @@ class TestTopology(Tester):
         while start + 30 > time.time() and node3.is_running():
             time.sleep(1)
 
-        self.assertFalse(node3.is_running())
+        assert not node3.is_running()
 
     @since('3.0')
-    def crash_during_decommission_test(self):
+    def test_crash_during_decommission(self):
         """
         If a node crashes whilst another node is being decommissioned,
         upon restarting the crashed node should not have invalid entries
@@ -412,7 +414,7 @@ class TestTopology(Tester):
         @jira_ticket CASSANDRA-10231
         """
         cluster = self.cluster
-        self.ignore_log_patterns = [r'Streaming error occurred', 'Stream failed']
+        self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred', 'Stream failed']
         cluster.populate(3).start(wait_other_notice=True)
 
         node1, node2 = cluster.nodelist()[0:2]
@@ -425,24 +427,24 @@ class TestTopology(Tester):
         while t.is_alive():
             out = self.show_status(node2)
             if null_status_pattern.search(out):
-                debug("Matched null status entry")
+                logger.debug("Matched null status entry")
                 break
-            debug("Restarting node2")
+            logger.debug("Restarting node2")
             node2.stop(gently=False)
             node2.start(wait_for_binary_proto=True, wait_other_notice=False)
 
-        debug("Waiting for decommission to complete")
+        logger.debug("Waiting for decommission to complete")
         t.join()
         self.show_status(node2)
 
-        debug("Sleeping for 30 seconds to allow gossip updates")
+        logger.debug("Sleeping for 30 seconds to allow gossip updates")
         time.sleep(30)
         out = self.show_status(node2)
-        self.assertFalse(null_status_pattern.search(out))
+        assert not null_status_pattern.search(out)
 
     @since('3.12')
-    @attr('resource-intensive')
-    def stop_decommission_too_few_replicas_multi_dc_test(self):
+    @pytest.mark.resource_intensive
+    def test_stop_decommission_too_few_replicas_multi_dc(self):
         """
         Decommission should fail when it would result in the number of live replicas being less than
         the replication factor. --force should bypass this requirement.
@@ -455,22 +457,22 @@ class TestTopology(Tester):
         session = self.patient_cql_connection(node2)
         session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
         create_ks(session, 'ks', {'dc1': 2, 'dc2': 2})
-        with self.assertRaises(ToolError):
+        with pytest.raises(ToolError):
             node4.nodetool('decommission')
 
         session.execute('DROP KEYSPACE ks')
         create_ks(session, 'ks2', 4)
-        with self.assertRaises(ToolError):
+        with pytest.raises(ToolError):
             node4.nodetool('decommission')
 
         node4.nodetool('decommission --force')
         decommissioned = node4.watch_log_for("DECOMMISSIONED", timeout=120)
-        self.assertTrue(decommissioned, "Node failed to decommission when passed --force")
+        assert decommissioned, "Node failed to decommission when passed --force"
 
     def show_status(self, node):
         out, _, _ = node.nodetool('status')
-        debug("Status as reported by node {}".format(node.address()))
-        debug(out)
+        logger.debug("Status as reported by node {}".format(node.address()))
+        logger.debug(out)
         return out
 
 
@@ -486,8 +488,8 @@ class DecommissionInParallel(Thread):
         try:
             out, err, _ = node.nodetool("decommission")
             node.watch_log_for("DECOMMISSIONED", from_mark=mark)
-            debug(out)
-            debug(err)
+            logger.debug(out)
+            logger.debug(err)
         except ToolError as e:
-            debug("Decommission failed with exception: " + str(e))
+            logger.debug("Decommission failed with exception: " + str(e))
             pass

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/ttl_test.py
----------------------------------------------------------------------
diff --git a/ttl_test.py b/ttl_test.py
index 46df7b5..b7237d6 100644
--- a/ttl_test.py
+++ b/ttl_test.py
@@ -1,22 +1,29 @@
 import time
+import pytest
+import logging
+
 from collections import OrderedDict
 
 from cassandra import ConsistencyLevel
 from cassandra.query import SimpleStatement
 from cassandra.util import sortedset
 
-from dtest import Tester, debug, create_ks
+from dtest import Tester, create_ks
 from tools.assertions import (assert_all, assert_almost_equal, assert_none,
                               assert_row_count, assert_unavailable)
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('2.0')
 class TestTTL(Tester):
     """ Test Time To Live Feature """
 
-    def setUp(self):
-        super(TestTTL, self).setUp()
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_ttl_test_setup(self, fixture_dtest_setup):
+        self.cluster = fixture_dtest_setup.cluster
+        self.fixture_dtest_setup = fixture_dtest_setup
         self.cluster.populate(1).start()
         [node1] = self.cluster.nodelist()
         self.session1 = self.patient_cql_connection(node1)
@@ -51,9 +58,8 @@ class TestTTL(Tester):
         if real_time_to_wait > 0:
             time.sleep(real_time_to_wait)
 
-    def default_ttl_test(self):
+    def test_default_ttl(self):
         """ Test default_time_to_live specified on a table """
-
         self.prepare(default_time_to_live=1)
         start = time.time()
         self.session1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d)" % (1, 1))
@@ -62,9 +68,8 @@ class TestTTL(Tester):
         self.smart_sleep(start, 3)
         assert_row_count(self.session1, 'ttl_table', 0)
 
-    def insert_ttl_has_priority_on_defaut_ttl_test(self):
+    def test_insert_ttl_has_priority_on_defaut_ttl(self):
         """ Test that a ttl specified during an insert has priority on the default table ttl """
-
         self.prepare(default_time_to_live=1)
 
         start = time.time()
@@ -76,9 +81,8 @@ class TestTTL(Tester):
         self.smart_sleep(start, 7)
         assert_row_count(self.session1, 'ttl_table', 0)
 
-    def insert_ttl_works_without_default_ttl_test(self):
+    def test_insert_ttl_works_without_default_ttl(self):
         """ Test that a ttl specified during an insert works even if a table has no default ttl """
-
         self.prepare()
 
         start = time.time()
@@ -88,9 +92,8 @@ class TestTTL(Tester):
         self.smart_sleep(start, 3)
         assert_row_count(self.session1, 'ttl_table', 0)
 
-    def default_ttl_can_be_removed_test(self):
+    def test_default_ttl_can_be_removed(self):
         """ Test that default_time_to_live can be removed """
-
         self.prepare(default_time_to_live=1)
 
         start = time.time()
@@ -101,9 +104,8 @@ class TestTTL(Tester):
         self.smart_sleep(start, 1.5)
         assert_row_count(self.session1, 'ttl_table', 1)
 
-    def removing_default_ttl_does_not_affect_existing_rows_test(self):
+    def test_removing_default_ttl_does_not_affect_existing_rows(self):
         """ Test that removing a default_time_to_live doesn't affect the existings rows """
-
         self.prepare(default_time_to_live=1)
 
         self.session1.execute("ALTER TABLE ttl_table WITH default_time_to_live = 10;")
@@ -123,9 +125,8 @@ class TestTTL(Tester):
         self.smart_sleep(start, 20)
         assert_row_count(self.session1, 'ttl_table', 1)
 
-    def update_single_column_ttl_test(self):
+    def test_update_single_column_ttl(self):
         """ Test that specifying a TTL on a single column works """
-
         self.prepare()
 
         self.session1.execute("""
@@ -137,9 +138,8 @@ class TestTTL(Tester):
         self.smart_sleep(start, 5)
         assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, None, 1, 1]])
 
-    def update_multiple_columns_ttl_test(self):
+    def test_update_multiple_columns_ttl(self):
         """ Test that specifying a TTL on multiple columns works """
-
         self.prepare()
 
         self.session1.execute("""
@@ -153,12 +153,11 @@ class TestTTL(Tester):
         self.smart_sleep(start, 4)
         assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, None, None, None]])
 
-    def update_column_ttl_with_default_ttl_test(self):
+    def test_update_column_ttl_with_default_ttl(self):
         """
         Test that specifying a column ttl works when a default ttl is set.
         This test specify a lower ttl for the column than the default ttl.
         """
-
         self.prepare(default_time_to_live=8)
 
         start = time.time()
@@ -190,11 +189,10 @@ class TestTTL(Tester):
         self.smart_sleep(start, 8)
         assert_row_count(self.session1, 'ttl_table', 0)
 
-    def remove_column_ttl_test(self):
+    def test_remove_column_ttl(self):
         """
         Test that removing a column ttl works.
         """
-
         self.prepare()
 
         start = time.time()
@@ -206,12 +204,11 @@ class TestTTL(Tester):
         assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
 
     @since('3.6')
-    def set_ttl_to_zero_to_default_ttl_test(self):
+    def test_set_ttl_to_zero_to_default_ttl(self):
         """
         Test that we can remove the default ttl by setting the ttl explicitly to zero.
         CASSANDRA-11207
         """
-
         self.prepare(default_time_to_live=2)
 
         start = time.time()
@@ -225,11 +222,10 @@ class TestTTL(Tester):
         assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
 
     @since('2.1', max_version='3.5')
-    def remove_column_ttl_with_default_ttl_test(self):
+    def test_remove_column_ttl_with_default_ttl(self):
         """
         Test that we cannot remove a column ttl when a default ttl is set.
         """
-
         self.prepare(default_time_to_live=2)
 
         start = time.time()
@@ -247,11 +243,10 @@ class TestTTL(Tester):
         self.smart_sleep(start, 10)
         assert_row_count(self.session1, 'ttl_table', 0)
 
-    def collection_list_ttl_test(self):
+    def test_collection_list_ttl(self):
         """
         Test that ttl has a granularity of elements using a list collection.
         """
-
         self.prepare(default_time_to_live=10)
 
         self.session1.execute("ALTER TABLE ttl_table ADD mylist list<int>;""")
@@ -268,11 +263,10 @@ class TestTTL(Tester):
         self.smart_sleep(start, 12)
         assert_row_count(self.session1, 'ttl_table', 0)
 
-    def collection_set_ttl_test(self):
+    def test_collection_set_ttl(self):
         """
         Test that ttl has a granularity of elements using a set collection.
         """
-
         self.prepare(default_time_to_live=10)
 
         self.session1.execute("ALTER TABLE ttl_table ADD myset set<int>;""")
@@ -297,11 +291,10 @@ class TestTTL(Tester):
         self.smart_sleep(start, 12)
         assert_row_count(self.session1, 'ttl_table', 0)
 
-    def collection_map_ttl_test(self):
+    def test_collection_map_ttl(self):
         """
         Test that ttl has a granularity of elements using a map collection.
         """
-
         self.prepare(default_time_to_live=6)
 
         self.session1.execute("ALTER TABLE ttl_table ADD mymap map<int, int>;""")
@@ -326,7 +319,7 @@ class TestTTL(Tester):
         self.smart_sleep(start, 8)
         assert_row_count(self.session1, 'ttl_table', 0)
 
-    def delete_with_ttl_expired_test(self):
+    def test_delete_with_ttl_expired(self):
         """
         Updating a row with a ttl does not prevent deletion, test for CASSANDRA-6363
         """
@@ -344,13 +337,14 @@ class TestTTL(Tester):
 class TestDistributedTTL(Tester):
     """ Test Time To Live Feature in a distributed environment """
 
-    def setUp(self):
-        super(TestDistributedTTL, self).setUp()
-        self.cluster.populate(2).start()
-        [self.node1, self.node2] = self.cluster.nodelist()
-        self.session1 = self.patient_cql_connection(self.node1)
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_set_cluster_settings(self, fixture_dtest_setup):
+        fixture_dtest_setup.cluster.populate(2).start()
+        [self.node1, self.node2] = fixture_dtest_setup.cluster.nodelist()
+        self.session1 = fixture_dtest_setup.patient_cql_connection(self.node1)
         create_ks(self.session1, 'ks', 2)
 
+
     def prepare(self, default_time_to_live=None):
         self.session1.execute("DROP TABLE IF EXISTS ttl_table;")
         query = """
@@ -366,11 +360,10 @@ class TestDistributedTTL(Tester):
 
         self.session1.execute(query)
 
-    def ttl_is_replicated_test(self):
+    def test_ttl_is_replicated(self):
         """
         Test that the ttl setting is replicated properly on all nodes
         """
-
         self.prepare(default_time_to_live=5)
         session1 = self.patient_exclusive_cql_connection(self.node1)
         session2 = self.patient_exclusive_cql_connection(self.node2)
@@ -392,15 +385,14 @@ class TestDistributedTTL(Tester):
 
         # since the two queries are not executed simultaneously, the remaining
         # TTLs can differ by one second
-        self.assertLessEqual(abs(ttl_session1[0][0] - ttl_session2[0][0]), 1)
+        assert abs(ttl_session1[0][0] - ttl_session2[0][0]) <= 1
 
         time.sleep(7)
 
         assert_none(session1, "SELECT * FROM ttl_table;", cl=ConsistencyLevel.ALL)
 
-    def ttl_is_respected_on_delayed_replication_test(self):
+    def test_ttl_is_respected_on_delayed_replication(self):
         """ Test that ttl is respected on delayed replication """
-
         self.prepare()
         self.node2.stop()
         self.session1.execute("""
@@ -437,13 +429,12 @@ class TestDistributedTTL(Tester):
         ttl_1 = self.session1.execute('SELECT ttl(col1) FROM ttl_table;')[0][0]
         ttl_2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')[0][0]
 
-        debug("ttl_1 is {}:".format(ttl_1))
-        debug("ttl_2 is {}:".format(ttl_2))
-        self.assertLessEqual(abs(ttl_1 - ttl_2), 1)
+        logger.debug("ttl_1 is {}:".format(ttl_1))
+        logger.debug("ttl_2 is {}:".format(ttl_2))
+        assert abs(ttl_1 - ttl_2) <= 1
 
-    def ttl_is_respected_on_repair_test(self):
+    def test_ttl_is_respected_on_repair(self):
         """ Test that ttl is respected on repair """
-
         self.prepare()
         self.session1.execute("""
             ALTER KEYSPACE ks WITH REPLICATION =

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/udtencoding_test.py
----------------------------------------------------------------------
diff --git a/udtencoding_test.py b/udtencoding_test.py
index 59c0e48..eb5929e 100644
--- a/udtencoding_test.py
+++ b/udtencoding_test.py
@@ -1,12 +1,15 @@
 import time
+import logging
 
 from tools.assertions import assert_invalid
 from dtest import Tester, create_ks
 
+logger = logging.getLogger(__name__)
+
 
 class TestUDTEncoding(Tester):
 
-    def udt_test(self):
+    def test_udt(self):
         """ Test (somewhat indirectly) that user queries involving UDT's are properly encoded (due to driver not recognizing UDT syntax) """
         cluster = self.cluster
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_crc_check_chance_test.py
----------------------------------------------------------------------
diff --git a/upgrade_crc_check_chance_test.py b/upgrade_crc_check_chance_test.py
index ec758c2..3ad1b59 100644
--- a/upgrade_crc_check_chance_test.py
+++ b/upgrade_crc_check_chance_test.py
@@ -1,21 +1,26 @@
-from unittest import skipIf
+import pytest
 
-from dtest import OFFHEAP_MEMTABLES, Tester, debug
+from dtest import Tester
 from tools.assertions import assert_crc_check_chance_equal, assert_one
-from tools.decorators import since
 
+since = pytest.mark.since
 
+
+@pytest.mark.upgrade_test
 @since('3.0')
 class TestCrcCheckChanceUpgrade(Tester):
-    ignore_log_patterns = (
-        # This one occurs if we do a non-rolling upgrade, the node
-        # it's trying to send the migration to hasn't started yet,
-        # and when it does, it gets replayed and everything is fine.
-        r'Can\'t send migration request: node.*is down',
-    )
-
-    @skipIf(OFFHEAP_MEMTABLES, 'offheap_objects are not available in 3.0')
-    def crc_check_chance_upgrade_test(self):
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+             # This one occurs if we do a non-rolling upgrade, the node
+            # it's trying to send the migration to hasn't started yet,
+            # and when it does, it gets replayed and everything is fine.
+            r'Can\'t send migration request: node.*is down',
+        )
+
+    @pytest.mark.no_offheap_memtables
+    def test_crc_check_chance_upgrade(self):
         """
         Tests behavior of compression property crc_check_chance after upgrade to 3.0,
         when it was promoted to a top-level property
@@ -93,46 +98,46 @@ class TestCrcCheckChanceUpgrade(Tester):
         assert_one(session, "SELECT * FROM ks.cf1 WHERE id=7", [7, 0])
         session.shutdown()
 
-        debug('Test completed successfully')
+        logger.debug('Test completed successfully')
 
     def verify_old_crc_check_chance(self, node):
         session = self.patient_exclusive_cql_connection(node)
         session.cluster.refresh_schema_metadata(0)
         meta = session.cluster.metadata.keyspaces['ks'].tables['cf1']
-        debug(meta.options['compression_parameters'])
-        self.assertEqual('{"crc_check_chance":"0.6","sstable_compression":"org.apache.cassandra.io.compress.DeflateCompressor","chunk_length_kb":"256"}',
-                         meta.options['compression_parameters'])
+        logger.debug(meta.options['compression_parameters'])
+        assert '{"crc_check_chance":"0.6","sstable_compression":"org.apache.cassandra.io.compress.DeflateCompressor","chunk_length_kb":"256"}' \
+               == meta.options['compression_parameters']
         session.shutdown()
 
     def verify_new_crc_check_chance(self, node):
         session = self.patient_exclusive_cql_connection(node)
         session.cluster.refresh_schema_metadata(0)
         meta = session.cluster.metadata.keyspaces['ks'].tables['cf1']
-        self.assertEqual('org.apache.cassandra.io.compress.DeflateCompressor', meta.options['compression']['class'])
-        self.assertEqual('256', meta.options['compression']['chunk_length_in_kb'])
+        assert 'org.apache.cassandra.io.compress.DeflateCompressor' == meta.options['compression']['class']
+        assert '256' == meta.options['compression']['chunk_length_in_kb']
         assert_crc_check_chance_equal(session, "cf1", 0.6)
         session.shutdown()
 
     def upgrade_to_version(self, tag, node):
         format_args = {'node': node.name, 'tag': tag}
-        debug('Upgrading node {node} to {tag}'.format(**format_args))
+        logger.debug('Upgrading node {node} to {tag}'.format(**format_args))
         # drain and shutdown
         node.drain()
         node.watch_log_for("DRAINED")
         node.stop(wait_other_notice=False)
-        debug('{node} stopped'.format(**format_args))
+        logger.debug('{node} stopped'.format(**format_args))
 
         # Update Cassandra Directory
-        debug('Updating version to tag {tag}'.format(**format_args))
+        logger.debug('Updating version to tag {tag}'.format(**format_args))
 
-        debug('Set new cassandra dir for {node}: {tag}'.format(**format_args))
+        logger.debug('Set new cassandra dir for {node}: {tag}'.format(**format_args))
         node.set_install_dir(version='git:' + tag, verbose=True)
         # Restart node on new version
-        debug('Starting {node} on new version ({tag})'.format(**format_args))
+        logger.debug('Starting {node} on new version ({tag})'.format(**format_args))
         # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
         node.set_log_level("INFO")
         node.start(wait_other_notice=True, wait_for_binary_proto=True)
 
-        debug('Running upgradesstables')
+        logger.debug('Running upgradesstables')
         node.nodetool('upgradesstables -a')
-        debug('Upgrade of {node} complete'.format(**format_args))
+        logger.debug('Upgrade of {node} complete'.format(**format_args))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_internal_auth_test.py
----------------------------------------------------------------------
diff --git a/upgrade_internal_auth_test.py b/upgrade_internal_auth_test.py
index 62acac9..a4de1ca 100644
--- a/upgrade_internal_auth_test.py
+++ b/upgrade_internal_auth_test.py
@@ -1,33 +1,47 @@
 import time
-from unittest import skipIf
+import pytest
+import logging
 
 from cassandra import Unauthorized
 from ccmlib.common import is_win
 from ccmlib.node import Node
 
-from dtest import OFFHEAP_MEMTABLES, Tester, debug
+from dtest_setup_overrides import DTestSetupOverrides
+
+from dtest import Tester
 from tools.assertions import assert_all, assert_invalid
-from tools.decorators import since
 from tools.misc import ImmutableMapping
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
+@pytest.mark.upgrade_test
 @since('2.2')
 class TestAuthUpgrade(Tester):
-    cluster_options = ImmutableMapping({'authenticator': 'PasswordAuthenticator',
-                                        'authorizer': 'CassandraAuthorizer'})
-    ignore_log_patterns = (
-        # This one occurs if we do a non-rolling upgrade, the node
-        # it's trying to send the migration to hasn't started yet,
-        # and when it does, it gets replayed and everything is fine.
-        r'Can\'t send migration request: node.*is down',
+
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        dtest_setup_overrides.cluster_options = ImmutableMapping({'authenticator': 'PasswordAuthenticator',
+                                                               'authorizer': 'CassandraAuthorizer'})
+        return dtest_setup_overrides
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            # This one occurs if we do a non-rolling upgrade, the node
+            # it's trying to send the migration to hasn't started yet,
+            # and when it does, it gets replayed and everything is fine.
+            r'Can\'t send migration request: node.*is down',
     )
 
-    def upgrade_to_22_test(self):
+    def test_upgrade_to_22(self):
         self.do_upgrade_with_internal_auth("github:apache/cassandra-2.2")
 
     @since('3.0')
-    @skipIf(OFFHEAP_MEMTABLES, 'offheap_objects are not available in 3.0')
-    def upgrade_to_30_test(self):
+    @pytest.mark.no_offheap_memtables
+    def test_upgrade_to_30(self):
         self.do_upgrade_with_internal_auth("github:apache/cassandra-3.0")
 
     @since('2.2', max_version='3.X')
@@ -72,8 +86,10 @@ class TestAuthUpgrade(Tester):
 
         replacement_address = node1.address()
         replacement_node = Node('replacement', cluster=self.cluster, auto_bootstrap=True,
-                                thrift_interface=(replacement_address, 9160), storage_interface=(replacement_address, 7000),
-                                jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042))
+                                thrift_interface=(replacement_address, 9160),
+                                storage_interface=(replacement_address, 7000),
+                                jmx_port='7400', remote_debug_port='0', initial_token=None,
+                                binary_interface=(replacement_address, 9042))
         self.set_node_to_current_version(replacement_node)
 
         cluster.add(replacement_node, True)
@@ -150,7 +166,7 @@ class TestAuthUpgrade(Tester):
         session.execute('DROP TABLE system_auth.permissions', timeout=60)
         # and we should still be able to authenticate and check authorization
         self.check_permissions(node1, True)
-        debug('Test completed successfully')
+        logger.debug('Test completed successfully')
 
     def check_permissions(self, node, upgraded):
         # use an exclusive connection to ensure we only talk to the specified node
@@ -185,32 +201,32 @@ class TestAuthUpgrade(Tester):
 
     def upgrade_to_version(self, tag, node):
         format_args = {'node': node.name, 'tag': tag}
-        debug('Upgrading node {node} to {tag}'.format(**format_args))
+        logger.debug('Upgrading node {node} to {tag}'.format(**format_args))
         # drain and shutdown
         node.drain()
         node.watch_log_for("DRAINED")
         node.stop(wait_other_notice=False)
-        debug('{node} stopped'.format(**format_args))
+        logger.debug('{node} stopped'.format(**format_args))
 
         # Ignore errors before upgrade on Windows
         if is_win():
             node.mark_log_for_errors()
 
         # Update Cassandra Directory
-        debug('Updating version to tag {tag}'.format(**format_args))
+        logger.debug('Updating version to tag {tag}'.format(**format_args))
         node.set_install_dir(version=tag, verbose=True)
-        debug('Set new cassandra dir for {node}: {tag}'.format(**format_args))
+        logger.debug('Set new cassandra dir for {node}: {tag}'.format(**format_args))
 
         # Restart node on new version
-        debug('Starting {node} on new version ({tag})'.format(**format_args))
+        logger.debug('Starting {node} on new version ({tag})'.format(**format_args))
         # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
         node.set_log_level("INFO")
         node.start(wait_other_notice=True)
         # wait for the conversion of legacy data to either complete or fail
         # (because not enough upgraded nodes are available yet)
-        debug('Waiting for conversion of legacy data to complete or fail')
+        logger.debug('Waiting for conversion of legacy data to complete or fail')
         node.watch_log_for('conversion of legacy permissions')
 
-        debug('Running upgradesstables')
+        logger.debug('Running upgradesstables')
         node.nodetool('upgradesstables -a')
-        debug('Upgrade of {node} complete'.format(**format_args))
+        logger.debug('Upgrade of {node} complete'.format(**format_args))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/bootstrap_upgrade_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/bootstrap_upgrade_test.py b/upgrade_tests/bootstrap_upgrade_test.py
index 43f735a..efe8ae4 100644
--- a/upgrade_tests/bootstrap_upgrade_test.py
+++ b/upgrade_tests/bootstrap_upgrade_test.py
@@ -1,9 +1,12 @@
-from bootstrap_test import BaseBootstrapTest
-from tools.decorators import since, no_vnodes
+import pytest
 
+from bootstrap_test import TestBootstrap
 
-class TestBootstrapUpgrade(BaseBootstrapTest):
-    __test__ = True
+since = pytest.mark.since
+
+
+@pytest.mark.upgrade_test
+class TestBootstrapUpgrade(TestBootstrap):
 
     """
     @jira_ticket CASSANDRA-11841
@@ -11,7 +14,7 @@ class TestBootstrapUpgrade(BaseBootstrapTest):
     In particular, we want to test that keep-alive is not sent
     to a node with version < 3.10
     """
-    @no_vnodes()
+    @pytest.mark.no_vnodes
     @since('3.10', max_version='3.99')
-    def simple_bootstrap_test_mixed_versions(self):
+    def test_simple_bootstrap_mixed_versions(self):
         self._base_bootstrap_test(bootstrap_from_version="3.5")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[23/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/paging_test.py
----------------------------------------------------------------------
diff --git a/paging_test.py b/paging_test.py
index 24a4739..d41b8f3 100644
--- a/paging_test.py
+++ b/paging_test.py
@@ -1,5 +1,10 @@
 import time
 import uuid
+import pytest
+import logging
+
+from flaky import flaky
+
 from distutils.version import LooseVersion
 
 from cassandra import ConsistencyLevel as CL
@@ -8,19 +13,21 @@ from cassandra.policies import FallthroughRetryPolicy
 from cassandra.query import (SimpleStatement, dict_factory,
                              named_tuple_factory, tuple_factory)
 
-from dtest import Tester, debug, run_scenarios, create_ks, supports_v5_protocol
+from dtest import Tester, run_scenarios, create_ks
 from tools.assertions import (assert_all, assert_invalid, assert_length_equal,
-                              assert_one)
+                              assert_one, assert_lists_equal_ignoring_order)
 from tools.data import rows_to_list
 from tools.datahelp import create_rows, flatten_into_set, parse_data_into_dicts
-from tools.decorators import since
 from tools.paging import PageAssertionMixin, PageFetcher
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 class BasePagingTester(Tester):
 
     def prepare(self, row_factory=dict_factory):
-        supports_v5 = supports_v5_protocol(self.cluster.version())
+        supports_v5 = self.supports_v5_protocol(self.cluster.version())
         protocol_version = 5 if supports_v5 else None
         cluster = self.cluster
         cluster.populate(3).start(wait_for_binary_proto=True)
@@ -54,8 +61,8 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future)
         pf.request_all()
-        self.assertEqual([], pf.all_data())
-        self.assertFalse(pf.has_more_pages)
+        assert [] == pf.all_data()
+        assert not pf.has_more_pages
 
     def test_with_less_results_than_page_size(self):
         session = self.prepare()
@@ -71,7 +78,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
             |4 |and more testing|
             |5 |and more testing|
             """
-        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
         future = session.execute_async(
             SimpleStatement("select * from paging_test", fetch_size=100, consistency_level=CL.ALL)
@@ -79,8 +86,8 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
         pf = PageFetcher(future)
         pf.request_all()
 
-        self.assertFalse(pf.has_more_pages)
-        self.assertEqual(len(expected_data), len(pf.all_data()))
+        assert not pf.has_more_pages
+        assert len(expected_data) == len(pf.all_data())
 
     def test_with_more_results_than_page_size(self):
         session = self.prepare()
@@ -100,7 +107,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
             |8 |and more testing|
             |9 |and more testing|
             """
-        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
         future = session.execute_async(
             SimpleStatement("select * from paging_test", fetch_size=5, consistency_level=CL.ALL)
@@ -108,11 +115,11 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.pagecount(), 2)
-        self.assertEqual(pf.num_results_all(), [5, 4])
+        assert pf.pagecount() == 2
+        assert pf.num_results_all() == [5, 4]
 
         # make sure expected and actual have same data elements (ignoring order)
-        self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
+        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="id")
 
     def test_with_equal_results_to_page_size(self):
         session = self.prepare()
@@ -128,7 +135,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
             |4 |and more testing|
             |5 |and more testing|
             """
-        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
         future = session.execute_async(
             SimpleStatement("select * from paging_test", fetch_size=5, consistency_level=CL.ALL)
@@ -136,11 +143,11 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.num_results_all(), [5])
-        self.assertEqual(pf.pagecount(), 1)
+        assert pf.num_results_all() == [5]
+        assert pf.pagecount() == 1
 
         # make sure expected and actual have same data elements (ignoring order)
-        self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
+        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="id")
 
     def test_undefined_page_size_default(self):
         """
@@ -158,7 +165,7 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
                +--------+--------+
           *5001| [uuid] |testing |
             """
-        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': random_txt, 'value': unicode})
+        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': random_txt, 'value': str})
 
         future = session.execute_async(
             SimpleStatement("select * from paging_test", consistency_level=CL.ALL)
@@ -166,10 +173,10 @@ class TestPagingSize(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.num_results_all(), [5000, 1])
+        assert pf.num_results_all(), [5000, 1]
 
         # make sure expected and actual have same data elements (ignoring order)
-        self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
+        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="id")
 
 
 @since('2.0')
@@ -209,7 +216,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
             |1 |j    |
             """
 
-        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
         future = session.execute_async(
             SimpleStatement("select * from paging_test where id = 1 order by value asc", fetch_size=5, consistency_level=CL.ALL)
@@ -217,14 +224,14 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.pagecount(), 2)
-        self.assertEqual(pf.num_results_all(), [5, 5])
+        assert pf.pagecount() == 2
+        assert pf.num_results_all() == [5, 5]
 
         # these should be equal (in the same order)
-        self.assertEqual(pf.all_data(), expected_data)
+        assert pf.all_data() == expected_data
 
         # make sure we don't allow paging over multiple partitions with order because that's weird
-        with self.assertRaisesRegexp(InvalidRequest, 'Cannot page queries with both ORDER BY and a IN restriction on the partition key'):
+        with pytest.raises(InvalidRequest, match='Cannot page queries with both ORDER BY and a IN restriction on the partition key'):
             stmt = SimpleStatement("select * from paging_test where id in (1,2) order by value asc", consistency_level=CL.ALL)
             session.execute(stmt)
 
@@ -259,7 +266,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
             |1 |j    |j     |
             """
 
-        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode, 'value2': unicode})
+        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str, 'value2': str})
 
         future = session.execute_async(
             SimpleStatement("select * from paging_test where id = 1 order by value asc", fetch_size=3, consistency_level=CL.ALL)
@@ -267,11 +274,11 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.pagecount(), 4)
-        self.assertEqual(pf.num_results_all(), [3, 3, 3, 1])
+        assert pf.pagecount() == 4
+        assert pf.num_results_all(), [3, 3, 3, 1]
 
         # these should be equal (in the same order)
-        self.assertEqual(pf.all_data(), expected_data)
+        assert pf.all_data() == expected_data
 
         # drop the ORDER BY
         future = session.execute_async(
@@ -280,11 +287,11 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.pagecount(), 4)
-        self.assertEqual(pf.num_results_all(), [3, 3, 3, 1])
+        assert pf.pagecount() == 4
+        assert pf.num_results_all(), [3, 3, 3, 1]
 
         # these should be equal (in the same order)
-        self.assertEqual(pf.all_data(), list(reversed(expected_data)))
+        assert pf.all_data() == list(reversed(expected_data))
 
     def test_with_limit(self):
         session = self.prepare()
@@ -292,7 +299,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
         session.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         data = """
                | id | value         |
@@ -361,8 +368,8 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
                 self.fail("Invalid scenario configuration. Scenario is: {}".format(scenario))
 
             pf = PageFetcher(future).request_all()
-            self.assertEqual(pf.num_results_all(), scenario['expect_pgsizes'])
-            self.assertEqual(pf.pagecount(), scenario['expect_pgcount'])
+            assert pf.num_results_all() == scenario['expect_pgsizes']
+            assert pf.pagecount() == scenario['expect_pgcount']
 
             # make sure all the data retrieved is a subset of input data
             self.assertIsSubsetOf(pf.all_data(), expected_data)
@@ -387,7 +394,7 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
             |8 |and more testing|
             |9 |and more testing|
             """
-        create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': unicode})
+        create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
 
         future = session.execute_async(
             SimpleStatement("select * from paging_test where value = 'and more testing' ALLOW FILTERING", fetch_size=4, consistency_level=CL.ALL)
@@ -395,26 +402,24 @@ class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.pagecount(), 2)
-        self.assertEqual(pf.num_results_all(), [4, 3])
+        assert pf.pagecount() == 2
+        assert pf.num_results_all() == [4, 3]
 
         # make sure the allow filtering query matches the expected results (ignoring order)
-        self.assertEqualIgnoreOrder(
-            pf.all_data(),
-            parse_data_into_dicts(
-                """
-                |id|value           |
-                +--+----------------+
-                |2 |and more testing|
-                |3 |and more testing|
-                |4 |and more testing|
-                |5 |and more testing|
-                |7 |and more testing|
-                |8 |and more testing|
-                |9 |and more testing|
-                """, format_funcs={'id': int, 'value': unicode}
-            )
+        expected_data = parse_data_into_dicts(
+            """
+            |id|value           |
+            +--+----------------+
+            |2 |and more testing|
+            |3 |and more testing|
+            |4 |and more testing|
+            |5 |and more testing|
+            |7 |and more testing|
+            |8 |and more testing|
+            |9 |and more testing|
+            """, format_funcs={'id': int, 'value': str}
         )
+        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="value")
 
 
 @since('2.0')
@@ -426,7 +431,7 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         session.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         data = """
               | id | value                  |
@@ -441,10 +446,9 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.pagecount(), 4)
-        self.assertEqual(pf.num_results_all(), [3000, 3000, 3000, 1000])
-
-        self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
+        assert pf.pagecount() == 4
+        assert pf.num_results_all(), [3000, 3000, 3000, 1000]
+        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="value")
 
     def test_paging_across_multi_wide_rows(self):
         session = self.prepare()
@@ -452,7 +456,7 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         session.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         data = """
               | id | value                  |
@@ -468,10 +472,9 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
 
         pf = PageFetcher(future).request_all()
 
-        self.assertEqual(pf.pagecount(), 4)
-        self.assertEqual(pf.num_results_all(), [3000, 3000, 3000, 1000])
-
-        self.assertEqualIgnoreOrder(pf.all_data(), expected_data)
+        assert pf.pagecount() == 4
+        assert pf.num_results_all(), [3000, 3000, 3000, 1000]
+        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="value")
 
     def test_paging_using_secondary_indexes(self):
         session = self.prepare()
@@ -480,7 +483,7 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         session.execute("CREATE INDEX ON paging_test(mybool)")
 
         def random_txt(text):
-            return unicode(uuid.uuid4())
+            return str(uuid.uuid4())
 
         def bool_from_str_int(text):
             return bool(int(text))
@@ -505,11 +508,11 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         pf = PageFetcher(future).request_all()
 
         # the query only searched for True rows, so let's pare down the expectations for comparison
-        expected_data = filter(lambda x: x.get('mybool') is True, all_data)
+        expected_data = [x for x in all_data if x.get('mybool') is True]
 
-        self.assertEqual(pf.pagecount(), 2)
-        self.assertEqual(pf.num_results_all(), [400, 200])
-        self.assertEqualIgnoreOrder(expected_data, pf.all_data())
+        assert pf.pagecount() == 2
+        assert pf.num_results_all() == [400, 200]
+        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="sometext")
 
     def test_paging_with_in_orderby_and_two_partition_keys(self):
         session = self.prepare()
@@ -520,11 +523,10 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         assert_invalid(session, "select * from paging_test where col_2 IN (1, 2) and col_1=1 order by col_3 desc;", expected=InvalidRequest)
 
     @since('3.10')
-    def group_by_paging_test(self):
+    def test_group_by_paging(self):
         """
         @jira_ticket CASSANDRA-10707
         """
-
         session = self.prepare(row_factory=tuple_factory)
         create_ks(session, 'test_paging_with_group_by', 2)
         session.execute("CREATE TABLE test (a int, b int, c int, d int, e int, primary key (a, b, c, d))")
@@ -548,333 +550,332 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
 
             # Range queries
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test GROUP BY a"))
-            self.assertEqual(res, [[1, 2, 6, 4L, 24], [2, 2, 6, 2L, 12], [4, 8, 24, 1L, 24]])
+            assert res == [[1, 2, 6, 4, 24], [2, 2, 6, 2, 12], [4, 8, 24, 1, 24]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12],
-                                   [1, 4, 12, 2L, 24],
-                                   [2, 2, 6, 1L, 6],
-                                   [2, 4, 12, 1L, 12],
-                                   [4, 8, 24, 1L, 24]])
+            assert res == [[1, 2, 6, 2, 12],
+                           [1, 4, 12, 2, 24],
+                           [2, 2, 6, 1, 6],
+                           [2, 4, 12, 1, 12],
+                           [4, 8, 24, 1, 24]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test"))
-            self.assertEqual(res, [[1, 2, 6, 7L, 24]])
+            assert res == [[1, 2, 6, 7, 24]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE b = 2 GROUP BY a, b ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12],
-                                   [2, 2, 6, 1L, 6]])
+            assert res == [[1, 2, 6, 2, 12],
+                           [2, 2, 6, 1, 6]]
 
             assert_invalid(session, "SELECT a, b, e, count(b), max(e) FROM test WHERE b = 2 GROUP BY a, b;", expected=InvalidRequest)
 
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE b = 2 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 2, 6, 3L, 12]])
+            assert res == [[1, 2, 6, 3, 12]]
 
             assert_invalid(session, "SELECT a, b, e, count(b), max(e) FROM test WHERE b = 2", expected=InvalidRequest)
 
             # Range queries without aggregates
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test GROUP BY a, b, c"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 2, 2, 6],
-                                   [1, 4, 2, 6],
-                                   [2, 2, 3, 3],
-                                   [2, 4, 3, 6],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 2, 2, 6],
+                           [1, 4, 2, 6],
+                           [2, 2, 3, 3],
+                           [2, 4, 3, 6],
+                           [4, 8, 2, 12]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 4, 2, 6],
-                                   [2, 2, 3, 3],
-                                   [2, 4, 3, 6],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 4, 2, 6],
+                           [2, 2, 3, 3],
+                           [2, 4, 3, 6],
+                           [4, 8, 2, 12]]
 
             # Range query with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test GROUP BY a, b LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12],
-                                   [1, 4, 12, 2L, 24]])
+            assert res == [[1, 2, 6, 2, 12],
+                           [1, 4, 12, 2, 24]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 6, 7L, 24]])
+            assert res == [[1, 2, 6, 7, 24]]
 
             # Range queries without aggregates and with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test GROUP BY a, b, c LIMIT 3"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 2, 2, 6],
-                                   [1, 4, 2, 6]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 2, 2, 6],
+                           [1, 4, 2, 6]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test GROUP BY a, b LIMIT 3"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 4, 2, 6],
-                                   [2, 2, 3, 3]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 4, 2, 6],
+                           [2, 2, 3, 3]]
 
             # Range query with PER PARTITION LIMIT
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test GROUP BY a, b PER PARTITION LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12],
-                                   [1, 4, 12, 2L, 24],
-                                   [2, 2, 6, 1L, 6],
-                                   [2, 4, 12, 1L, 12],
-                                   [4, 8, 24, 1L, 24]])
+            assert res == [[1, 2, 6, 2, 12],
+                           [1, 4, 12, 2, 24],
+                           [2, 2, 6, 1, 6],
+                           [2, 4, 12, 1, 12],
+                           [4, 8, 24, 1, 24]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test GROUP BY a, b PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12],
-                                   [2, 2, 6, 1L, 6],
-                                   [4, 8, 24, 1L, 24]])
+            assert res == [[1, 2, 6, 2, 12],
+                           [2, 2, 6, 1, 6],
+                           [4, 8, 24, 1, 24]]
 
             # Range queries with PER PARTITION LIMIT and LIMIT
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test GROUP BY a, b PER PARTITION LIMIT 2 LIMIT 3"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12],
-                                   [1, 4, 12, 2L, 24],
-                                   [2, 2, 6, 1L, 6]])
+            assert res == [[1, 2, 6, 2, 12],
+                           [1, 4, 12, 2, 24],
+                           [2, 2, 6, 1, 6]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test GROUP BY a, b PER PARTITION LIMIT 2 LIMIT 5"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12],
-                                   [1, 4, 12, 2L, 24],
-                                   [2, 2, 6, 1L, 6],
-                                   [2, 4, 12, 1L, 12],
-                                   [4, 8, 24, 1L, 24]])
+            assert res == [[1, 2, 6, 2, 12],
+                           [1, 4, 12, 2, 24],
+                           [2, 2, 6, 1, 6],
+                           [2, 4, 12, 1, 12],
+                           [4, 8, 24, 1, 24]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test GROUP BY a, b PER PARTITION LIMIT 2 LIMIT 10"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12],
-                                   [1, 4, 12, 2L, 24],
-                                   [2, 2, 6, 1L, 6],
-                                   [2, 4, 12, 1L, 12],
-                                   [4, 8, 24, 1L, 24]])
+            assert res == [[1, 2, 6, 2, 12],
+                           [1, 4, 12, 2, 24],
+                           [2, 2, 6, 1, 6],
+                           [2, 4, 12, 1, 12],
+                           [4, 8, 24, 1, 24]]
 
             # Range queries without aggregates and with PER PARTITION LIMIT
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test GROUP BY a, b, c PER PARTITION LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 2, 2, 6],
-                                   [2, 2, 3, 3],
-                                   [2, 4, 3, 6],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 2, 2, 6],
+                           [2, 2, 3, 3],
+                           [2, 4, 3, 6],
+                           [4, 8, 2, 12]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test GROUP BY a, b PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [2, 2, 3, 3],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [2, 2, 3, 3],
+                           [4, 8, 2, 12]]
 
             # Range query with DISTINCT
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test GROUP BY a"))
-            self.assertEqual(res, [[1, 1L],
-                                   [2, 1L],
-                                   [4, 1L]])
+            assert res == [[1, 1],
+                           [2, 1],
+                           [4, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test"))
-            self.assertEqual(res, [[1, 3L]])
+            assert res == [[1, 3]]
 
             # Range query with DISTINCT and LIMIT
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 1L],
-                                   [2, 1L]])
+            assert res == [[1, 1],
+                           [2, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test LIMIT 2"))
-            self.assertEqual(res, [[1, 3L]])
+            assert res == [[1, 3]]
 
             # Single partition queries
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12],
-                                   [1, 4, 12, 2L, 24]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12],
+                           [1, 4, 12, 2, 24]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1"))
-            self.assertEqual(res, [[1, 2, 6, 4L, 24]])
+            assert res == [[1, 2, 6, 4, 24]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 AND b = 2 GROUP BY a, b, c"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 AND b = 2"))
-            self.assertEqual(res, [[1, 2, 6, 2L, 12]])
+            assert res == [[1, 2, 6, 2, 12]]
 
             # Single partition queries without aggregates
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a = 1 GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 4, 2, 6]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 4, 2, 6]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a = 1 GROUP BY a, b, c"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 2, 2, 6],
-                                   [1, 4, 2, 6]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 2, 2, 6],
+                           [1, 4, 2, 6]]
 
             # Single partition query with DISTINCT
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test WHERE a = 1 GROUP BY a"))
-            self.assertEqual(res, [[1, 1L]])
+            assert res == [[1, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test WHERE a = 1 GROUP BY a"))
-            self.assertEqual(res, [[1, 1L]])
+            assert res == [[1, 1]]
 
             # Single partition queries with LIMIT
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c LIMIT 10"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12],
-                                   [1, 4, 12, 2L, 24]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12],
+                           [1, 4, 12, 2, 24]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 6, 4L, 24]])
+            assert res == [[1, 2, 6, 4, 24]]
 
             res = rows_to_list(
                 session.execute("SELECT count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c LIMIT 1"))
-            self.assertEqual(res, [[1L, 6]])
+            assert res == [[1, 6]]
 
             # Single partition queries with PER PARTITION LIMIT
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 3"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12],
-                                   [1, 4, 12, 2L, 24]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12],
+                           [1, 4, 12, 2, 24]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 3"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12],
-                                   [1, 4, 12, 2L, 24]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12],
+                           [1, 4, 12, 2, 24]]
 
             # Single partition queries without aggregates and with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a = 1 GROUP BY a, b LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 4, 2, 6]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 4, 2, 6]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a = 1 GROUP BY a, b LIMIT 1"))
-            self.assertEqual(res, [[1, 2, 1, 3]])
+            assert res == [[1, 2, 1, 3]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a = 1 GROUP BY a, b, c LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 2, 2, 6]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 2, 2, 6]]
 
             # Single partition queries with ORDER BY
             res = rows_to_list(session.execute(
                 "SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC"))
-            self.assertEqual(res, [[1, 4, 24, 2L, 24],
-                                   [1, 2, 12, 1L, 12],
-                                   [1, 2, 6, 1L, 6]])
+            assert res == [[1, 4, 24, 2, 24],
+                           [1, 2, 12, 1, 12],
+                           [1, 2, 6, 1, 6]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 ORDER BY b DESC, c DESC"))
-            self.assertEqual(res, [[1, 4, 24, 4L, 24]])
+            assert res == [[1, 4, 24, 4, 24]]
 
             # Single partition queries with ORDER BY and LIMIT
             res = rows_to_list(session.execute(
                 "SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 2"))
-            self.assertEqual(res, [[1, 4, 24, 2L, 24],
-                                   [1, 2, 12, 1L, 12]])
+            assert res == [[1, 4, 24, 2, 24],
+                                   [1, 2, 12, 1, 12]]
 
             res = rows_to_list(session.execute(
                 "SELECT a, b, e, count(b), max(e) FROM test WHERE a = 1 ORDER BY b DESC, c DESC LIMIT 2"))
-            self.assertEqual(res, [[1, 4, 24, 4L, 24]])
+            assert res == [[1, 4, 24, 4, 24]]
 
             # Multi-partitions queries
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a IN (1, 2, 4) GROUP BY a, b, c"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12],
-                                   [1, 4, 12, 2L, 24],
-                                   [2, 2, 6, 1L, 6],
-                                   [2, 4, 12, 1L, 12],
-                                   [4, 8, 24, 1L, 24]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12],
+                           [1, 4, 12, 2, 24],
+                           [2, 2, 6, 1, 6],
+                           [2, 4, 12, 1, 12],
+                           [4, 8, 24, 1, 24]]
 
             res = rows_to_list(session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a IN (1, 2, 4)"))
-            self.assertEqual(res, [[1, 2, 6, 7L, 24]])
+            assert res == [[1, 2, 6, 7, 24]]
 
             res = rows_to_list(session.execute(
                 "SELECT a, b, e, count(b), max(e) FROM test WHERE a IN (1, 2, 4) AND b = 2 GROUP BY a, b, c"))
-            self.assertEqual(res, [[1, 2, 6, 1L, 6],
-                                   [1, 2, 12, 1L, 12],
-                                   [2, 2, 6, 1L, 6]])
+            assert res == [[1, 2, 6, 1, 6],
+                           [1, 2, 12, 1, 12],
+                           [2, 2, 6, 1, 6]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, e, count(b), max(e) FROM test WHERE a IN (1, 2, 4) AND b = 2"))
-            self.assertEqual(res, [[1, 2, 6, 3L, 12]])
+            assert res == [[1, 2, 6, 3, 12]]
 
             # Multi-partitions queries without aggregates
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a IN (1, 2, 4) GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 4, 2, 6],
-                                   [2, 2, 3, 3],
-                                   [2, 4, 3, 6],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 4, 2, 6],
+                           [2, 2, 3, 3],
+                           [2, 4, 3, 6],
+                           [4, 8, 2, 12]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a IN (1, 2, 4) GROUP BY a, b, c"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 2, 2, 6],
-                                   [1, 4, 2, 6],
-                                   [2, 2, 3, 3],
-                                   [2, 4, 3, 6],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 2, 2, 6],
+                           [1, 4, 2, 6],
+                           [2, 2, 3, 3],
+                           [2, 4, 3, 6],
+                           [4, 8, 2, 12]]
 
             # Multi-partitions queries with DISTINCT
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test WHERE a IN (1, 2, 4) GROUP BY a"))
-            self.assertEqual(res, [[1, 1L],
-                                   [2, 1L],
-                                   [4, 1L]])
+            assert res == [[1, 1],
+                           [2, 1],
+                           [4, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test WHERE a IN (1, 2, 4)"))
-            self.assertEqual(res, [[1, 3L]])
+            assert res == [[1, 3]]
 
             # Multi-partitions query with DISTINCT and LIMIT
             res = rows_to_list(
                 session.execute("SELECT DISTINCT a, count(a)FROM test WHERE a IN (1, 2, 4) GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 1L],
-                                   [2, 1L]])
+            assert res == [[1, 1],
+                                   [2, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, count(a)FROM test WHERE a IN (1, 2, 4) LIMIT 2"))
-            self.assertEqual(res, [[1, 3L]])
+            assert res == [[1, 3]]
 
             # Multi-partitions queries without aggregates and with PER PARTITION LIMIT
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [2, 2, 3, 3],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [2, 2, 3, 3],
+                           [4, 8, 2, 12]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 4, 2, 6],
-                                   [2, 2, 3, 3],
-                                   [2, 4, 3, 6],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 4, 2, 6],
+                           [2, 2, 3, 3],
+                           [2, 4, 3, 6],
+                           [4, 8, 2, 12]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 3"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 4, 2, 6],
-                                   [2, 2, 3, 3],
-                                   [2, 4, 3, 6],
-                                   [4, 8, 2, 12]])
+            assert res == [[1, 2, 1, 3],
+                           [1, 4, 2, 6],
+                           [2, 2, 3, 3],
+                           [2, 4, 3, 6],
+                           [4, 8, 2, 12]]
 
             # Multi-partitions queries without aggregates, with PER PARTITION LIMIT and with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [2, 2, 3, 3]])
+            assert res == [[1, 2, 1, 3],
+                                   [2, 2, 3, 3]]
 
             res = rows_to_list(session.execute("SELECT a, b, c, d FROM test WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 3 LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 3],
-                                   [1, 4, 2, 6]])
+            assert res == [[1, 2, 1, 3],
+                                   [1, 4, 2, 6]]
 
     @since('3.10')
-    def group_by_with_range_name_query_paging_test(self):
+    def test_group_by_with_range_name_query_paging(self):
         """
         @jira_ticket CASSANDRA-10707
         """
-
         session = self.prepare(row_factory=tuple_factory)
         create_ks(session, 'group_by_with_range_name_query_paging_test', 2)
         session.execute("CREATE TABLE test (a int, b int, c int, d int, primary key (a, b, c))")
 
-        for i in xrange(1, 5):
-            for j in xrange(1, 5):
-                for k in xrange(1, 5):
+        for i in range(1, 5):
+            for j in range(1, 5):
+                for k in range(1, 5):
                     session.execute("INSERT INTO test (a, b, c, d) VALUES ({}, {}, {}, {})".format(i, j, k, i + j))
 
         # Makes sure that we have some tombstones
@@ -885,62 +886,62 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
 
             # Range queries
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b = 1 and c IN (1, 2) GROUP BY a ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [2, 1, 3, 2L, 3],
-                                   [4, 1, 5, 2L, 5]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [2, 1, 3, 2, 3],
+                                   [4, 1, 5, 2, 5]]
 
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b = 1 and c IN (1, 2) GROUP BY a, b ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [2, 1, 3, 2L, 3],
-                                   [4, 1, 5, 2L, 5]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [2, 1, 3, 2, 3],
+                                   [4, 1, 5, 2, 5]]
 
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [1, 2, 3, 2L, 3],
-                                   [2, 1, 3, 2L, 3],
-                                   [2, 2, 4, 2L, 4],
-                                   [4, 1, 5, 2L, 5],
-                                   [4, 2, 6, 2L, 6]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [1, 2, 3, 2, 3],
+                                   [2, 1, 3, 2, 3],
+                                   [2, 2, 4, 2, 4],
+                                   [4, 1, 5, 2, 5],
+                                   [4, 2, 6, 2, 6]]
 
             # Range queries with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b = 1 and c IN (1, 2) GROUP BY a LIMIT 5 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [2, 1, 3, 2L, 3],
-                                   [4, 1, 5, 2L, 5]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [2, 1, 3, 2, 3],
+                                   [4, 1, 5, 2, 5]]
 
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b = 1 and c IN (1, 2) GROUP BY a, b LIMIT 3 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [2, 1, 3, 2L, 3],
-                                   [4, 1, 5, 2L, 5]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [2, 1, 3, 2, 3],
+                                   [4, 1, 5, 2, 5]]
 
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b LIMIT 3 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [1, 2, 3, 2L, 3],
-                                   [2, 1, 3, 2L, 3]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [1, 2, 3, 2, 3],
+                                   [2, 1, 3, 2, 3]]
 
             # Range queries with PER PARTITION LIMIT
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b = 1 and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 2 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [2, 1, 3, 2L, 3],
-                                   [4, 1, 5, 2L, 5]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [2, 1, 3, 2, 3],
+                                   [4, 1, 5, 2, 5]]
 
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 1 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [2, 1, 3, 2L, 3],
-                                   [4, 1, 5, 2L, 5]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [2, 1, 3, 2, 3],
+                                   [4, 1, 5, 2, 5]]
 
             # Range queries with PER PARTITION LIMIT and LIMIT
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b = 1 and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 2 LIMIT 5 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [2, 1, 3, 2L, 3],
-                                   [4, 1, 5, 2L, 5]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [2, 1, 3, 2, 3],
+                                   [4, 1, 5, 2, 5]]
 
             res = rows_to_list(session.execute("SELECT a, b, d, count(b), max(d) FROM test WHERE b IN (1, 2) and c IN (1, 2) GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 1, 2, 2L, 2],
-                                   [2, 1, 3, 2L, 3]])
+            assert res == [[1, 1, 2, 2, 2],
+                                   [2, 1, 3, 2, 3]]
 
     @since('3.10')
-    def group_by_with_static_columns_paging_test(self):
+    def test_group_by_with_static_columns_paging(self):
         """
         @jira_ticket CASSANDRA-10707
         """
@@ -961,148 +962,148 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
 
             # Range queries
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L],
-                                   [2, None, 2, 0L, 1L],
-                                   [4, None, 3, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1],
+                                   [2, None, 2, 0, 1],
+                                   [4, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L],
-                                   [2, None, 2, 0L, 1L],
-                                   [4, None, 3, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1],
+                                   [2, None, 2, 0, 1],
+                                   [4, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test"))
-            self.assertEqual(res, [[1, None, 1, 0L, 3L]])
+            assert res == [[1, None, 1, 0, 3]]
 
             # Range query without aggregates
             res = rows_to_list(session.execute("SELECT a, b, s FROM test GROUP BY a, b"))
-            self.assertEqual(res, [[1, None, 1],
+            assert res == [[1, None, 1],
                                    [2, None, 2],
-                                   [4, None, 3]])
+                                   [4, None, 3]]
 
             # Range queries with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b LIMIT 2"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L],
-                                   [2, None, 2, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1],
+                                   [2, None, 2, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test LIMIT 2"))
-            self.assertEqual(res, [[1, None, 1, 0L, 3L]])
+            assert res == [[1, None, 1, 0, 3]]
 
             # Range query with PER PARTITION LIMIT
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b PER PARTITION LIMIT 2"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L],
-                                   [2, None, 2, 0L, 1L],
-                                   [4, None, 3, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1],
+                           [2, None, 2, 0, 1],
+                           [4, None, 3, 0, 1]]
 
             # Range queries with DISTINCT
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(s) FROM test GROUP BY a"))
-            self.assertEqual(res, [[1, 1, 1L],
-                                   [2, 2, 1L],
-                                   [4, 3, 1L]])
+            assert res == [[1, 1, 1],
+                           [2, 2, 1],
+                           [4, 3, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(s) FROM test "))
-            self.assertEqual(res, [[1, 1, 3L]])
+            assert res == [[1, 1, 3]]
 
             # Range queries with DISTINCT and LIMIT
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(s) FROM test GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 1, 1L],
-                                   [2, 2, 1L]])
+            assert res == [[1, 1, 1],
+                           [2, 2, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(s) FROM test LIMIT 2"))
-            self.assertEqual(res, [[1, 1, 3L]])
+            assert res == [[1, 1, 3]]
 
             # Single partition queries
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 1 GROUP BY a"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 1 GROUP BY a, b"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 1"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1]]
 
             # Single partition query without aggregates
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a = 1 GROUP BY a, b"))
-            self.assertEqual(res, [[1, None, 1]])
+            assert res == [[1, None, 1]]
 
             # Single partition queries with LIMIT
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 1 GROUP BY a, b LIMIT 2"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 1 LIMIT 2"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1]]
 
             # Single partition queries with PER PARTITION LIMIT
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 2"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1]]
 
             # Single partition queries with DISTINCT
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(s) FROM test WHERE a = 1 GROUP BY a"))
-            self.assertEqual(res, [[1, 1, 1L]])
+            assert res == [[1, 1, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(s) FROM test WHERE a = 1"))
-            self.assertEqual(res, [[1, 1, 1L]])
+            assert res == [[1, 1, 1]]
 
             # Multi-partitions queries
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L],
-                                   [2, None, 2, 0L, 1L],
-                                   [4, None, 3, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1],
+                           [2, None, 2, 0, 1],
+                           [4, None, 3, 0, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L],
-                                   [2, None, 2, 0L, 1L],
-                                   [4, None, 3, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1],
+                           [2, None, 2, 0, 1],
+                           [4, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4)"))
-            self.assertEqual(res, [[1, None, 1, 0L, 3L]])
+            assert res == [[1, None, 1, 0, 3]]
 
             # Multi-partitions query without aggregates
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b"))
-            self.assertEqual(res, [[1, None, 1],
-                                   [2, None, 2],
-                                   [4, None, 3]])
+            assert res == [[1, None, 1],
+                           [2, None, 2],
+                           [4, None, 3]]
 
             # Multi-partitions query with LIMIT
             res = rows_to_list(session.execute(
                 "SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b LIMIT 2"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L],
-                                   [2, None, 2, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1],
+                           [2, None, 2, 0, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) LIMIT 2"))
-            self.assertEqual(res, [[1, None, 1, 0L, 3L]])
+            assert res == [[1, None, 1, 0, 3]]
 
             # Multi-partitions query with PER PARTITION LIMIT
             res = rows_to_list(session.execute(
                 "SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[1, None, 1, 0L, 1L],
-                                   [2, None, 2, 0L, 1L],
-                                   [4, None, 3, 0L, 1L]])
+            assert res == [[1, None, 1, 0, 1],
+                           [2, None, 2, 0, 1],
+                           [4, None, 3, 0, 1]]
 
             # Multi-partitions queries with DISTINCT
             res = rows_to_list(
                 session.execute("SELECT DISTINCT a, s, count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a"))
-            self.assertEqual(res, [[1, 1, 1L],
-                                   [2, 2, 1L],
-                                   [4, 3, 1L]])
+            assert res == [[1, 1, 1],
+                           [2, 2, 1],
+                           [4, 3, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(s) FROM test WHERE a IN (1, 2, 3, 4)"))
-            self.assertEqual(res, [[1, 1, 3L]])
+            assert res == [[1, 1, 3]]
 
             # Multi-partitions queries with DISTINCT and LIMIT
             res = rows_to_list(
                 session.execute("SELECT DISTINCT a, s, count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 1, 1L],
-                                   [2, 2, 1L]])
+            assert res == [[1, 1, 1],
+                           [2, 2, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT DISTINCT a, s, count(s) FROM test WHERE a IN (1, 2, 3, 4) LIMIT 2"))
-            self.assertEqual(res, [[1, 1, 3L]])
+            assert res == [[1, 1, 3]]
 
         # ------------------------------------
         # Test with non static columns not empty
@@ -1129,331 +1130,330 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
 
             # Range queries
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a"))
-            self.assertEqual(res, [[1, 2, 1, 4L, 4L],
-                                   [2, 2, 2, 2L, 2L],
-                                   [4, 8, None, 1L, 0L],
-                                   [3, None, 3, 0L, 1L]])
+            assert res == [[1, 2, 1, 4, 4],
+                           [2, 2, 2, 2, 2],
+                           [4, 8, None, 1, 0],
+                           [3, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [1, 4, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L],
-                                   [2, 4, 2, 1L, 1L],
-                                   [4, 8, None, 1L, 0L],
-                                   [3, None, 3, 0L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [1, 4, 1, 2, 2],
+                           [2, 2, 2, 1, 1],
+                           [2, 4, 2, 1, 1],
+                           [4, 8, None, 1, 0],
+                           [3, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test"))
-            self.assertEqual(res, [[1, 2, 1, 7L, 7L]])
+            assert res == [[1, 2, 1, 7, 7]]
 
             res = rows_to_list(
                 session.execute(
                     "SELECT a, b, s, count(b), count(s) FROM test WHERE b = 2 GROUP BY a, b ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [2, 2, 2, 1, 1]]
 
             assert_invalid(session, "SELECT a, b, s, count(b), count(s) FROM test WHERE b = 2 GROUP BY a, b", expected=InvalidRequest)
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE b = 2 ALLOW FILTERING"))
-            self.assertEqual(res, [[1, 2, 1, 3L, 3L]])
+            assert res == [[1, 2, 1, 3, 3]]
 
             assert_invalid(session, "SELECT a, b, s, count(b), count(s) FROM test WHERE b = 2", expected=InvalidRequest)
 
             # Range queries without aggregates
             res = rows_to_list(session.execute("SELECT a, b, s FROM test GROUP BY a"))
-            self.assertEqual(res, [[1, 2, 1],
-                                   [2, 2, 2],
-                                   [4, 8, None],
-                                   [3, None, 3]])
+            assert res == [[1, 2, 1],
+                           [2, 2, 2],
+                           [4, 8, None],
+                           [3, None, 3]]
 
             res = rows_to_list(session.execute("SELECT a, b, s FROM test GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 1],
-                                   [1, 4, 1],
-                                   [2, 2, 2],
-                                   [2, 4, 2],
-                                   [4, 8, None],
-                                   [3, None, 3]])
+            assert res == [[1, 2, 1],
+                           [1, 4, 1],
+                           [2, 2, 2],
+                           [2, 4, 2],
+                           [4, 8, None],
+                           [3, None, 3]]
 
             # Range query with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 4L, 4L],
-                                   [2, 2, 2, 2L, 2L]])
+            assert res == [[1, 2, 1, 4, 4],
+                           [2, 2, 2, 2, 2]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 7L, 7L]])
+            assert res == [[1, 2, 1, 7, 7]]
 
             # Range queries without aggregates and with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, s FROM test GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1],
-                                   [2, 2, 2]])
+            assert res == [[1, 2, 1],
+                           [2, 2, 2]]
 
             res = rows_to_list(session.execute("SELECT a, b, s FROM test GROUP BY a, b LIMIT 10"))
-            self.assertEqual(res, [[1, 2, 1],
-                                   [1, 4, 1],
-                                   [2, 2, 2],
-                                   [2, 4, 2],
-                                   [4, 8, None],
-                                   [3, None, 3]])
+            assert res == [[1, 2, 1],
+                           [1, 4, 1],
+                           [2, 2, 2],
+                           [2, 4, 2],
+                           [4, 8, None],
+                           [3, None, 3]]
 
             # Range queries with PER PARTITION LIMITS
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b PER PARTITION LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [1, 4, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L],
-                                   [2, 4, 2, 1L, 1L],
-                                   [4, 8, None, 1L, 0L],
-                                   [3, None, 3, 0L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [1, 4, 1, 2, 2],
+                           [2, 2, 2, 1, 1],
+                           [2, 4, 2, 1, 1],
+                           [4, 8, None, 1, 0],
+                           [3, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L],
-                                   [4, 8, None, 1L, 0L],
-                                   [3, None, 3, 0L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [2, 2, 2, 1, 1],
+                           [4, 8, None, 1, 0],
+                           [3, None, 3, 0, 1]]
 
             # Range queries with PER PARTITION LIMITS and LIMIT
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 5"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L],
-                                   [4, 8, None, 1L, 0L],
-                                   [3, None, 3, 0L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [2, 2, 2, 1, 1],
+                           [4, 8, None, 1, 0],
+                           [3, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 4"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L],
-                                   [4, 8, None, 1L, 0L],
-                                   [3, None, 3, 0L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [2, 2, 2, 1, 1],
+                           [4, 8, None, 1, 0],
+                           [3, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [2, 2, 2, 1, 1]]
 
             # Range queries with DISTINCT
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(a), count(s) FROM test GROUP BY a"))
-            self.assertEqual(res, [[1, 1, 1L, 1L],
-                                   [2, 2, 1L, 1L],
-                                   [4, None, 1L, 0L],
-                                   [3, 3, 1L, 1L]])
+            assert res == [[1, 1, 1, 1],
+                           [2, 2, 1, 1],
+                           [4, None, 1, 0],
+                           [3, 3, 1, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(a), count(s) FROM test"))
-            self.assertEqual(res, [[1, 1, 4L, 3L]])
+            assert res == [[1, 1, 4, 3]]
 
             # Range queries with DISTINCT and LIMIT
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(a), count(s) FROM test GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 1, 1L, 1L],
-                                   [2, 2, 1L, 1L]])
+            assert res == [[1, 1, 1, 1],
+                           [2, 2, 1, 1]]
 
             res = rows_to_list(session.execute("SELECT DISTINCT a, s, count(a), count(s) FROM test LIMIT 2"))
-            self.assertEqual(res, [[1, 1, 4L, 3L]])
+            assert res == [[1, 1, 4, 3]]
 
             # Single partition queries
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 1 GROUP BY a"))
-            self.assertEqual(res, [[1, 2, 1, 4L, 4L]])
+            assert res == [[1, 2, 1, 4, 4]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 3 GROUP BY a, b"))
-            self.assertEqual(res, [[3, None, 3, 0L, 1L]])
+            assert res == [[3, None, 3, 0, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 3"))
-            self.assertEqual(res, [[3, None, 3, 0L, 1L]])
+            assert res == [[3, None, 3, 0, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 AND b = 2 GROUP BY a, b"))
-            self.assertEqual(res, [[2, 2, 2, 1L, 1L]])
+            assert res == [[2, 2, 2, 1, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 AND b = 2"))
-            self.assertEqual(res, [[2, 2, 2, 1L, 1L]])
+            assert res == [[2, 2, 2, 1, 1]]
 
             # Single partition queries without aggregates
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a = 1 GROUP BY a"))
-            self.assertEqual(res, [[1, 2, 1]])
+            assert res == [[1, 2, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a = 4 GROUP BY a, b"))
-            self.assertEqual(res, [[4, 8, None]])
+            assert res == [[4, 8, None]]
 
             # Single partition queries with LIMIT
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 GROUP BY a, b LIMIT 1"))
-            self.assertEqual(res, [[2, 2, 2, 1L, 1L]])
+            assert res == [[2, 2, 2, 1, 1]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 LIMIT 1"))
-            self.assertEqual(res, [[2, 2, 2, 2L, 2L]])
+            assert res == [[2, 2, 2, 2, 2]]
 
             # Single partition queries without aggregates and with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a = 2 GROUP BY a, b LIMIT 1"))
-            self.assertEqual(res, [[2, 2, 2]])
+            assert res == [[2, 2, 2]]
 
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a = 2 GROUP BY a, b LIMIT 2"))
-            self.assertEqual(res, [[2, 2, 2],
-                                   [2, 4, 2]])
+            assert res == [[2, 2, 2],
+                           [2, 4, 2]]
 
             # Single partition queries with PER PARTITION LIMIT
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 GROUP BY a, b PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[2, 2, 2, 1L, 1L]])
+            assert res == [[2, 2, 2, 1, 1]]
 
             # Single partition queries with DISTINCT
             res = rows_to_list(
                 session.execute("SELECT DISTINCT a, s, count(a), count(s) FROM test WHERE a = 2 GROUP BY a"))
-            self.assertEqual(res, [[2, 2, 1L, 1L]])
+            assert res == [[2, 2, 1, 1]]
 
             # Single partition queries with ORDER BY
             res = rows_to_list(session.execute(
                 "SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 GROUP BY a, b ORDER BY b DESC, c DESC"))
-            self.assertEqual(res, [[2, 4, 2, 1L, 1L],
-                                   [2, 2, 2, 1L, 1L]])
+            assert res == [[2, 4, 2, 1, 1],
+                           [2, 2, 2, 1, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 ORDER BY b DESC, c DESC"))
-            self.assertEqual(res, [[2, 4, 2, 2L, 2L]])
+            assert res == [[2, 4, 2, 2, 2]]
 
             # Single partition queries with ORDER BY and LIMIT
             res = rows_to_list(session.execute(
                 "SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 GROUP BY a, b ORDER BY b DESC, c DESC LIMIT 1"))
-            self.assertEqual(res, [[2, 4, 2, 1L, 1L]])
+            assert res == [[2, 4, 2, 1, 1]]
 
             res = rows_to_list(session.execute(
                 "SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 ORDER BY b DESC, c DESC LIMIT 2"))
-            self.assertEqual(res, [[2, 4, 2, 2L, 2L]])
+            assert res == [[2, 4, 2, 2, 2]]
 
             # Single partition queries with ORDER BY and PER PARTITION LIMIT
             res = rows_to_list(session.execute(
                 "SELECT a, b, s, count(b), count(s) FROM test WHERE a = 2 GROUP BY a, b ORDER BY b DESC, c DESC PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[2, 4, 2, 1L, 1L]])
+            assert res == [[2, 4, 2, 1, 1]]
 
             # Multi-partitions queries
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a"))
-            self.assertEqual(res, [[1, 2, 1, 4L, 4L],
-                                   [2, 2, 2, 2L, 2L],
-                                   [3, None, 3, 0L, 1L],
-                                   [4, 8, None, 1L, 0L]])
+            assert res == [[1, 2, 1, 4, 4],
+                           [2, 2, 2, 2, 2],
+                           [3, None, 3, 0, 1],
+                           [4, 8, None, 1, 0]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [1, 4, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L],
-                                   [2, 4, 2, 1L, 1L],
-                                   [3, None, 3, 0L, 1L],
-                                   [4, 8, None, 1L, 0L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [1, 4, 1, 2, 2],
+                           [2, 2, 2, 1, 1],
+                           [2, 4, 2, 1, 1],
+                           [3, None, 3, 0, 1],
+                           [4, 8, None, 1, 0]]
 
             res = rows_to_list(session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4)"))
-            self.assertEqual(res, [[1, 2, 1, 7L, 7L]])
+            assert res == [[1, 2, 1, 7, 7]]
 
             res = rows_to_list(session.execute(
                 "SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) AND b = 2 GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [2, 2, 2, 1, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) AND b = 2"))
-            self.assertEqual(res, [[1, 2, 1, 3L, 3L]])
+            assert res == [[1, 2, 1, 3, 3]]
 
             # Multi-partitions queries without aggregates
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a"))
-            self.assertEqual(res, [[1, 2, 1],
-                                   [2, 2, 2],
-                                   [3, None, 3],
-                                   [4, 8, None]])
+            assert res == [[1, 2, 1],
+                           [2, 2, 2],
+                           [3, None, 3],
+                           [4, 8, None]]
 
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b"))
-            self.assertEqual(res, [[1, 2, 1],
-                                   [1, 4, 1],
-                                   [2, 2, 2],
-                                   [2, 4, 2],
-                                   [3, None, 3],
-                                   [4, 8, None]])
+            assert res == [[1, 2, 1],
+                           [1, 4, 1],
+                           [2, 2, 2],
+                           [2, 4, 2],
+                           [3, None, 3],
+                           [4, 8, None]]
 
             # Multi-partitions queries with LIMIT
             res = rows_to_list(session.execute(
                 "SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 4L, 4L],
-                                   [2, 2, 2, 2L, 2L]])
+            assert res == [[1, 2, 1, 4, 4],
+                           [2, 2, 2, 2, 2]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 7L, 7L]])
+            assert res == [[1, 2, 1, 7, 7]]
 
             # Multi-partitions queries without aggregates and with LIMIT
             res = rows_to_list(session.execute("SELECT a, b, s FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1],
-                                   [2, 2, 2]])
+            assert res == [[1, 2, 1],
+                           [2, 2, 2]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b LIMIT 10"))
-            self.assertEqual(res, [[1, 2, 1],
-                                   [1, 4, 1],
-                                   [2, 2, 2],
-                                   [2, 4, 2],
-                                   [3, None, 3],
-                                   [4, 8, None]])
+            assert res == [[1, 2, 1],
+                           [1, 4, 1],
+                           [2, 2, 2],
+                           [2, 4, 2],
+                           [3, None, 3],
+                           [4, 8, None]]
 
             # Multi-partitions queries with PER PARTITION LIMIT
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[1, 2, 1, 4L, 4L],
-                                   [2, 2, 2, 2L, 2L],
-                                   [3, None, 3, 0L, 1L],
-                                   [4, 8, None, 1L, 0L]])
+            assert res == [[1, 2, 1, 4, 4],
+                           [2, 2, 2, 2, 2],
+                           [3, None, 3, 0, 1],
+                           [4, 8, None, 1, 0]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 2"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [1, 4, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L],
-                                   [2, 4, 2, 1L, 1L],
-                                   [3, None, 3, 0L, 1L],
-                                   [4, 8, None, 1L, 0L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [1, 4, 1, 2, 2],
+                           [2, 2, 2, 1, 1],
+                           [2, 4, 2, 1, 1],
+                           [3, None, 3, 0, 1],
+                           [4, 8, None, 1, 0]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 1"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L],
-                                   [3, None, 3, 0L, 1L],
-                                   [4, 8, None, 1L, 0L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [2, 2, 2, 1, 1],
+                           [3, None, 3, 0, 1],
+                           [4, 8, None, 1, 0]]
 
             # Multi-partitions queries with DISTINCT
             res = rows_to_list(session.execute(
                 "SELECT DISTINCT a, s, count(a), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a"))
-            self.assertEqual(res, [[1, 1, 1L, 1L],
-                                   [2, 2, 1L, 1L],
-                                   [3, 3, 1L, 1L],
-                                   [4, None, 1L, 0L]])
+            assert res == [[1, 1, 1, 1],
+                           [2, 2, 1, 1],
+                           [3, 3, 1, 1],
+                           [4, None, 1, 0]]
 
             # Multi-partitions queries with PER PARTITION LIMIT and LIMIT
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a PER PARTITION LIMIT 1 LIMIT 3"))
-            self.assertEqual(res, [[1, 2, 1, 4L, 4L],
-                                   [2, 2, 2, 2L, 2L],
-                                   [3, None, 3, 0L, 1L]])
+            assert res == [[1, 2, 1, 4, 4],
+                           [2, 2, 2, 2, 2],
+                           [3, None, 3, 0, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT a, b, s, count(b), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 2 LIMIT 3"))
-            self.assertEqual(res, [[1, 2, 1, 2L, 2L],
-                                   [1, 4, 1, 2L, 2L],
-                                   [2, 2, 2, 1L, 1L]])
+            assert res == [[1, 2, 1, 2, 2],
+                           [1, 4, 1, 2, 2],
+                           [2, 2, 2, 1, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT DISTINCT a, s, count(a), count(s) FROM test WHERE a IN (1, 2, 3, 4)"))
-            self.assertEqual(res, [[1, 1, 4L, 3L]])
+            assert res == [[1, 1, 4, 3]]
 
             # Multi-partitions query with DISTINCT and LIMIT
             res = rows_to_list(session.execute(
                 "SELECT DISTINCT a, s, count(a), count(s) FROM test WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2"))
-            self.assertEqual(res, [[1, 1, 1L, 1L],
-                                   [2, 2, 1L, 1L]])
+            assert res == [[1, 1, 1, 1],
+                           [2, 2, 1, 1]]
 
             res = rows_to_list(
                 session.execute("SELECT DISTINCT a, s, count(a), count(s) FROM test WHERE a IN (1, 2, 3, 4) LIMIT 2"))
-            self.assertEqual(res, [[1, 1, 4L, 3L]])
+            assert res == [[1, 1, 4, 3]]
 
     @since('2.0.6')
-    def static_columns_paging_test(self):
+    def test_static_columns_paging(self):
         """
         Exercises paging with static columns to detect bugs
         @jira_ticket CASSANDRA-8502.
         """
-
         session = self.prepare(row_factory=named_tuple_factory)
         create_ks(session, 'test_paging_static_cols', 2)
         session.execute("CREATE TABLE test (a int, b int, c int, s1 int static, s2 int static, PRIMARY KEY (a, b))")
@@ -1472,186 +1472,186 @@ class TestPagingData(BasePagingTester, PageAssertionMixin):
         PAGE_SIZES = (2, 3, 4, 5, 15, 16, 17, 100)
 
         for page_size in PAGE_SIZES:
-            debug("Current page size is {}".format(page_size))
+            logger.debug("Current page size is {}".format(page_size))
             session.default_fetch_size = page_size
             for selector in selectors:
                 results = list(session.execute("SELECT %s FROM test" % selector))
                 assert_length_equal(results, 16)
-                self.assertEqual([0] * 4 + [1] * 4 + [2] * 4 + [3] * 4, sorted([r.a for r in results]))
-                self.assertEqual([0, 1, 2, 3] * 4, [r.b for r in results])
-                self.assertEqual([0, 1, 2, 3] * 4, [r.c for r in results])
+                assert [0] * 4 + [1] * 4 + [2] * 4 + [3] * 4 == sorted([r.a for r in results])
+                assert [0, 1, 2, 3] * 4 == [r.b for r in results]
+                assert [0, 1, 2, 3] * 4 == [r.c for r in results]
                 if "s1" in selector:
-                    self.assertEqual([17] * 16, [r.s1 for r in results])
+                    assert [17] * 16 == [r.s1 for r in results]
                 if "s2" in selector:
-                    self.assertEqual([42] * 16, [r.s2 for r in results])
+                    assert [42] * 16 == [r.s2 for r in results]
 
         # IN over the partitions
         for page_size in PAGE_SIZES:
-            debug("Current page size is {}".format(page_size))
+            logger.debug("Current page size is {}".format(page_size))
             session.default_fetch_size = page_size
             for selector in selectors:
                 results = list(session.execute("SELECT %s FROM test WHERE a IN (0, 1, 2, 3)" % selector))
                 assert_length_equal(results, 16)
-                self.assertEqual([0] * 4 + [1] * 4 + [2] * 4 + [3] * 4, sorted([r.a for r in results]))
-                self.assertEqual([0, 1, 2, 3] * 4, [r.b for r in results])
-                self.assertEqual([0, 1, 2, 3] * 4, [r.c for r in results])
+                assert [0] * 4 + [1] * 4 + [2] * 4 + [3] * 4 == sorted([r.a for r in results])
+                assert [0, 1, 2, 3] * 4 == [r.b for r in results]
+                assert [0, 1, 2, 3] * 4 == [r.c for r in results]
                 if "s1" in selector:
-                    self.assertEqual([17] * 16, [r.s1 for r in results])
+                    assert [17] * 16 == [r.s1 for r in results]
                 if "s2" in selector:
-                    self.assertEqual([42] * 16, [r.s2 for r in results])
+                    assert [42] * 16 == [r.s2 for r in results]
 
         # single partition
         for i in range(16):
             session.execute("INSERT INTO test (a, b, c, s1, s2) VALUES (%d, %d, %d, %d, %d)" % (99, i, i, 17, 42))
 
         for page_size in PAGE_SIZES:
-            debug("Current page size is {}".format(page_size))
+            logger.debug("Current page size is {}".format(page_size))
             session.default_fetch_size = page_size
             for selector in selectors:
                 results = list(session.execute("SELECT %s FROM test WHERE a = 99" % selector))
                 assert_length_equal(results, 16)
-                self.assertEqual([99] * 16, [r.a for r in results])
-                self.assertEqual(range(16), [r.b for r in results])
-                self.assertEqual(range(16), [r.c for r in results])
+                assert [99] * 16 == [r.a for r in results]
+                assert list(range(16)) == [r.b for r in results]
+                assert list(range(16)) == [r.c for r in results]
                 if "s1" in selector:
-                    self.assertEqual([17] * 16, [r.s1 for r in results])
+                    assert [17] * 16 == [r.s1 for r in results]
                 if "s2" in selector:
-                    self.assertEqual([42] * 16, [r.s2 for r in results])
+                    assert [42] * 16 == [r.s2 for r in results]
 
         # reversed
         for page_size in PAGE_SIZES:
-            debug("Current page size is {}".format(page_size))
+            logger.debug("Current page size is {}".format(page_size))
             session.default_fetch_size = page_size
             for selector in selectors:
                 results = list(session.execute("SELECT %s FROM test WHERE a = 99 ORDER BY b DESC" % selector))
                 assert_length_equal(results, 16)
-                self.assertEqual([99] * 16, [r.a for r in results])
-                self.assertEqual(list(reversed(range(16))), [r.b for r in results])
-                self.assertEqual(list(reversed(range(16))), [r.c for r in results])
+                assert [99] * 16 == [r.a for r in results]
+                assert list(reversed(list(range(16)))) == [r.b for r in results]
+                assert list(reversed(list(range(16)))) == [r.c for r in results]
                 if "s1" in selector:
-                    self.assertEqual([17] * 16, [r.s1 for r in results])
+                    assert [17] * 16 == [r.s1 for r in results]
                 if "s2" in selector:
-                    self.assertEqual([42] * 16, [r.s2 for r in results])
+                    assert [42] * 16 == [r.s2 for r in results]
 
         # IN on clustering column
         for page_size in PAGE_SIZES:
-            debug("Current page size is {}".format(page_size))
+            logger.debug("Current page size is {}".format(page_size))
             session.default_fetch_size = page_size
             for selector in selectors:
                 results = list(session.execute("SELECT %s FROM test WHERE a = 99 AND b IN (3, 4, 8, 14, 15)" % selector))
                 assert_length_equal(results, 5)
-                self.assertEqual([99] * 5, [r.a for r in results])
-                self.assertEqual([3, 4, 8, 14, 15], [r.b for r in results])
-                self.assertEqual([3, 4, 8, 14, 15], [r.c for r in results])
+                assert [99] * 5 == [r.a for r in results]
+                assert [3, 4, 8, 14, 15] == [r.b for r in results]
+                assert [3, 4, 8, 14, 15] == [r.c for r in results]
                 if "s1" in selector:
-                    self.assertEqual([17] * 5, [r.s1 for r in results])
+                    assert [17] * 5 == [r.s1 for r in results]
                 if "s2" in selector:
-                    self.assertEqual([42] * 5, [r.s2 for r in results])
+                    assert [42] * 5 == [r.s2 for r in results]
 
         # reversed IN on clustering column
         for page_size in PAGE_SIZES:
-            debug("Current page size is {}".format(page_size))
+            logger.debug("Current page size is {}".format(page_size))
             session.default_fetch_size = page_size
             for selector in selectors:
                 results = list(session.execute("SELECT %s FROM test WHERE a = 99 AND b IN (3, 4, 8, 14, 15) ORDER BY b DESC" % selector))
                 assert_length_equal(results, 5)
-                self.assertEqual([99] * 5, [r.a for r in results])
-                self.assertEqual(list(reversed([3, 4, 8, 14, 15])), [r.b for r in results])
-                self.assertEqual(list(reversed([3, 4, 8, 14, 15])), [r.c for r in results])
+                assert [99] * 5 == [r.a for r in results]
+                assert list(reversed([3, 4, 8, 14, 15])) == [r.b for r in results]
+                assert list(reversed([3, 4, 8, 14, 15])) == [r.c for r in results]
                 if "s1" in selec

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[10/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/v22/ttypes.py
----------------------------------------------------------------------
diff --git a/thrift_bindings/v22/ttypes.py b/thrift_bindings/v22/ttypes.py
deleted file mode 100644
index 4cbbd67..0000000
--- a/thrift_bindings/v22/ttypes.py
+++ /dev/null
@@ -1,4219 +0,0 @@
-#
-# Autogenerated by Thrift Compiler (0.9.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-#  options string: py
-#
-
-from thrift.protocol import TBinaryProtocol, TProtocol
-from thrift.Thrift import (TApplicationException, TException, TMessageType,
-                           TType)
-from thrift.transport import TTransport
-
-try:
-  from thrift.protocol import fastbinary
-except:
-  fastbinary = None
-
-
-class ConsistencyLevel:
-  """
-  The ConsistencyLevel is an enum that controls both read and write
-  behavior based on the ReplicationFactor of the keyspace.  The
-  different consistency levels have different meanings, depending on
-  if you're doing a write or read operation.
-
-  If W + R > ReplicationFactor, where W is the number of nodes to
-  block for on write, and R the number to block for on reads, you
-  will have strongly consistent behavior; that is, readers will
-  always see the most recent write. Of these, the most interesting is
-  to do QUORUM reads and writes, which gives you consistency while
-  still allowing availability in the face of node failures up to half
-  of <ReplicationFactor>. Of course if latency is more important than
-  consistency then you can use lower values for either or both.
-
-  Some ConsistencyLevels (ONE, TWO, THREE) refer to a specific number
-  of replicas rather than a logical concept that adjusts
-  automatically with the replication factor.  Of these, only ONE is
-  commonly used; TWO and (even more rarely) THREE are only useful
-  when you care more about guaranteeing a certain level of
-  durability, than consistency.
-
-  Write consistency levels make the following guarantees before reporting success to the client:
-    ANY          Ensure that the write has been written once somewhere, including possibly being hinted in a non-target node.
-    ONE          Ensure that the write has been written to at least 1 node's commit log and memory table
-    TWO          Ensure that the write has been written to at least 2 node's commit log and memory table
-    THREE        Ensure that the write has been written to at least 3 node's commit log and memory table
-    QUORUM       Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes
-    LOCAL_ONE    Ensure that the write has been written to 1 node within the local datacenter (requires NetworkTopologyStrategy)
-    LOCAL_QUORUM Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes, within the local datacenter (requires NetworkTopologyStrategy)
-    EACH_QUORUM  Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes in each datacenter (requires NetworkTopologyStrategy)
-    ALL          Ensure that the write is written to <code>&lt;ReplicationFactor&gt;</code> nodes before responding to the client.
-
-  Read consistency levels make the following guarantees before returning successful results to the client:
-    ANY          Not supported. You probably want ONE instead.
-    ONE          Returns the record obtained from a single replica.
-    TWO          Returns the record with the most recent timestamp once two replicas have replied.
-    THREE        Returns the record with the most recent timestamp once three replicas have replied.
-    QUORUM       Returns the record with the most recent timestamp once a majority of replicas have replied.
-    LOCAL_ONE    Returns the record with the most recent timestamp once a single replica within the local datacenter have replied.
-    LOCAL_QUORUM Returns the record with the most recent timestamp once a majority of replicas within the local datacenter have replied.
-    EACH_QUORUM  Returns the record with the most recent timestamp once a majority of replicas within each datacenter have replied.
-    ALL          Returns the record with the most recent timestamp once all replicas have replied (implies no replica may be down)..
-  """
-  ONE = 1
-  QUORUM = 2
-  LOCAL_QUORUM = 3
-  EACH_QUORUM = 4
-  ALL = 5
-  ANY = 6
-  TWO = 7
-  THREE = 8
-  SERIAL = 9
-  LOCAL_SERIAL = 10
-  LOCAL_ONE = 11
-
-  _VALUES_TO_NAMES = {
-    1: "ONE",
-    2: "QUORUM",
-    3: "LOCAL_QUORUM",
-    4: "EACH_QUORUM",
-    5: "ALL",
-    6: "ANY",
-    7: "TWO",
-    8: "THREE",
-    9: "SERIAL",
-    10: "LOCAL_SERIAL",
-    11: "LOCAL_ONE",
-  }
-
-  _NAMES_TO_VALUES = {
-    "ONE": 1,
-    "QUORUM": 2,
-    "LOCAL_QUORUM": 3,
-    "EACH_QUORUM": 4,
-    "ALL": 5,
-    "ANY": 6,
-    "TWO": 7,
-    "THREE": 8,
-    "SERIAL": 9,
-    "LOCAL_SERIAL": 10,
-    "LOCAL_ONE": 11,
-  }
-
-class IndexOperator:
-  EQ = 0
-  GTE = 1
-  GT = 2
-  LTE = 3
-  LT = 4
-
-  _VALUES_TO_NAMES = {
-    0: "EQ",
-    1: "GTE",
-    2: "GT",
-    3: "LTE",
-    4: "LT",
-  }
-
-  _NAMES_TO_VALUES = {
-    "EQ": 0,
-    "GTE": 1,
-    "GT": 2,
-    "LTE": 3,
-    "LT": 4,
-  }
-
-class IndexType:
-  KEYS = 0
-  CUSTOM = 1
-  COMPOSITES = 2
-
-  _VALUES_TO_NAMES = {
-    0: "KEYS",
-    1: "CUSTOM",
-    2: "COMPOSITES",
-  }
-
-  _NAMES_TO_VALUES = {
-    "KEYS": 0,
-    "CUSTOM": 1,
-    "COMPOSITES": 2,
-  }
-
-class Compression:
-  """
-  CQL query compression
-  """
-  GZIP = 1
-  NONE = 2
-
-  _VALUES_TO_NAMES = {
-    1: "GZIP",
-    2: "NONE",
-  }
-
-  _NAMES_TO_VALUES = {
-    "GZIP": 1,
-    "NONE": 2,
-  }
-
-class CqlResultType:
-  ROWS = 1
-  VOID = 2
-  INT = 3
-
-  _VALUES_TO_NAMES = {
-    1: "ROWS",
-    2: "VOID",
-    3: "INT",
-  }
-
-  _NAMES_TO_VALUES = {
-    "ROWS": 1,
-    "VOID": 2,
-    "INT": 3,
-  }
-
-
-class Column:
-  """
-  Basic unit of data within a ColumnFamily.
-  @param name, the name by which this column is set and retrieved.  Maximum 64KB long.
-  @param value. The data associated with the name.  Maximum 2GB long, but in practice you should limit it to small numbers of MB (since Thrift must read the full value into memory to operate on it).
-  @param timestamp. The timestamp is used for conflict detection/resolution when two columns with same name need to be compared.
-  @param ttl. An optional, positive delay (in seconds) after which the column will be automatically deleted.
-
-  Attributes:
-   - name
-   - value
-   - timestamp
-   - ttl
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'name', None, None, ), # 1
-    (2, TType.STRING, 'value', None, None, ), # 2
-    (3, TType.I64, 'timestamp', None, None, ), # 3
-    (4, TType.I32, 'ttl', None, None, ), # 4
-  )
-
-  def __init__(self, name=None, value=None, timestamp=None, ttl=None,):
-    self.name = name
-    self.value = value
-    self.timestamp = timestamp
-    self.ttl = ttl
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.name = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.value = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.I64:
-          self.timestamp = iprot.readI64();
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.I32:
-          self.ttl = iprot.readI32();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('Column')
-    if self.name is not None:
-      oprot.writeFieldBegin('name', TType.STRING, 1)
-      oprot.writeString(self.name)
-      oprot.writeFieldEnd()
-    if self.value is not None:
-      oprot.writeFieldBegin('value', TType.STRING, 2)
-      oprot.writeString(self.value)
-      oprot.writeFieldEnd()
-    if self.timestamp is not None:
-      oprot.writeFieldBegin('timestamp', TType.I64, 3)
-      oprot.writeI64(self.timestamp)
-      oprot.writeFieldEnd()
-    if self.ttl is not None:
-      oprot.writeFieldBegin('ttl', TType.I32, 4)
-      oprot.writeI32(self.ttl)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.name is None:
-      raise TProtocol.TProtocolException(message='Required field name is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class SuperColumn:
-  """
-  A named list of columns.
-  @param name. see Column.name.
-  @param columns. A collection of standard Columns.  The columns within a super column are defined in an adhoc manner.
-                  Columns within a super column do not have to have matching structures (similarly named child columns).
-
-  Attributes:
-   - name
-   - columns
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'name', None, None, ), # 1
-    (2, TType.LIST, 'columns', (TType.STRUCT,(Column, Column.thrift_spec)), None, ), # 2
-  )
-
-  def __init__(self, name=None, columns=None,):
-    self.name = name
-    self.columns = columns
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.name = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.LIST:
-          self.columns = []
-          (_etype3, _size0) = iprot.readListBegin()
-          for _i4 in xrange(_size0):
-            _elem5 = Column()
-            _elem5.read(iprot)
-            self.columns.append(_elem5)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('SuperColumn')
-    if self.name is not None:
-      oprot.writeFieldBegin('name', TType.STRING, 1)
-      oprot.writeString(self.name)
-      oprot.writeFieldEnd()
-    if self.columns is not None:
-      oprot.writeFieldBegin('columns', TType.LIST, 2)
-      oprot.writeListBegin(TType.STRUCT, len(self.columns))
-      for iter6 in self.columns:
-        iter6.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.name is None:
-      raise TProtocol.TProtocolException(message='Required field name is unset!')
-    if self.columns is None:
-      raise TProtocol.TProtocolException(message='Required field columns is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class CounterColumn:
-  """
-  Attributes:
-   - name
-   - value
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'name', None, None, ), # 1
-    (2, TType.I64, 'value', None, None, ), # 2
-  )
-
-  def __init__(self, name=None, value=None,):
-    self.name = name
-    self.value = value
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.name = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I64:
-          self.value = iprot.readI64();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('CounterColumn')
-    if self.name is not None:
-      oprot.writeFieldBegin('name', TType.STRING, 1)
-      oprot.writeString(self.name)
-      oprot.writeFieldEnd()
-    if self.value is not None:
-      oprot.writeFieldBegin('value', TType.I64, 2)
-      oprot.writeI64(self.value)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.name is None:
-      raise TProtocol.TProtocolException(message='Required field name is unset!')
-    if self.value is None:
-      raise TProtocol.TProtocolException(message='Required field value is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class CounterSuperColumn:
-  """
-  Attributes:
-   - name
-   - columns
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'name', None, None, ), # 1
-    (2, TType.LIST, 'columns', (TType.STRUCT,(CounterColumn, CounterColumn.thrift_spec)), None, ), # 2
-  )
-
-  def __init__(self, name=None, columns=None,):
-    self.name = name
-    self.columns = columns
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.name = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.LIST:
-          self.columns = []
-          (_etype10, _size7) = iprot.readListBegin()
-          for _i11 in xrange(_size7):
-            _elem12 = CounterColumn()
-            _elem12.read(iprot)
-            self.columns.append(_elem12)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('CounterSuperColumn')
-    if self.name is not None:
-      oprot.writeFieldBegin('name', TType.STRING, 1)
-      oprot.writeString(self.name)
-      oprot.writeFieldEnd()
-    if self.columns is not None:
-      oprot.writeFieldBegin('columns', TType.LIST, 2)
-      oprot.writeListBegin(TType.STRUCT, len(self.columns))
-      for iter13 in self.columns:
-        iter13.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.name is None:
-      raise TProtocol.TProtocolException(message='Required field name is unset!')
-    if self.columns is None:
-      raise TProtocol.TProtocolException(message='Required field columns is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class ColumnOrSuperColumn:
-  """
-  Methods for fetching rows/records from Cassandra will return either a single instance of ColumnOrSuperColumn or a list
-  of ColumnOrSuperColumns (get_slice()). If you're looking up a SuperColumn (or list of SuperColumns) then the resulting
-  instances of ColumnOrSuperColumn will have the requested SuperColumn in the attribute super_column. For queries resulting
-  in Columns, those values will be in the attribute column. This change was made between 0.3 and 0.4 to standardize on
-  single query methods that may return either a SuperColumn or Column.
-
-  If the query was on a counter column family, you will either get a counter_column (instead of a column) or a
-  counter_super_column (instead of a super_column)
-
-  @param column. The Column returned by get() or get_slice().
-  @param super_column. The SuperColumn returned by get() or get_slice().
-  @param counter_column. The Counterolumn returned by get() or get_slice().
-  @param counter_super_column. The CounterSuperColumn returned by get() or get_slice().
-
-  Attributes:
-   - column
-   - super_column
-   - counter_column
-   - counter_super_column
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'column', (Column, Column.thrift_spec), None, ), # 1
-    (2, TType.STRUCT, 'super_column', (SuperColumn, SuperColumn.thrift_spec), None, ), # 2
-    (3, TType.STRUCT, 'counter_column', (CounterColumn, CounterColumn.thrift_spec), None, ), # 3
-    (4, TType.STRUCT, 'counter_super_column', (CounterSuperColumn, CounterSuperColumn.thrift_spec), None, ), # 4
-  )
-
-  def __init__(self, column=None, super_column=None, counter_column=None, counter_super_column=None,):
-    self.column = column
-    self.super_column = super_column
-    self.counter_column = counter_column
-    self.counter_super_column = counter_super_column
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.column = Column()
-          self.column.read(iprot)
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRUCT:
-          self.super_column = SuperColumn()
-          self.super_column.read(iprot)
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRUCT:
-          self.counter_column = CounterColumn()
-          self.counter_column.read(iprot)
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.STRUCT:
-          self.counter_super_column = CounterSuperColumn()
-          self.counter_super_column.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('ColumnOrSuperColumn')
-    if self.column is not None:
-      oprot.writeFieldBegin('column', TType.STRUCT, 1)
-      self.column.write(oprot)
-      oprot.writeFieldEnd()
-    if self.super_column is not None:
-      oprot.writeFieldBegin('super_column', TType.STRUCT, 2)
-      self.super_column.write(oprot)
-      oprot.writeFieldEnd()
-    if self.counter_column is not None:
-      oprot.writeFieldBegin('counter_column', TType.STRUCT, 3)
-      self.counter_column.write(oprot)
-      oprot.writeFieldEnd()
-    if self.counter_super_column is not None:
-      oprot.writeFieldBegin('counter_super_column', TType.STRUCT, 4)
-      self.counter_super_column.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class NotFoundException(TException):
-  """
-  A specific column was requested that does not exist.
-  """
-
-  thrift_spec = (
-  )
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('NotFoundException')
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __str__(self):
-    return repr(self)
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class InvalidRequestException(TException):
-  """
-  Invalid request could mean keyspace or column family does not exist, required parameters are missing, or a parameter is malformed.
-  why contains an associated error message.
-
-  Attributes:
-   - why
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'why', None, None, ), # 1
-  )
-
-  def __init__(self, why=None,):
-    self.why = why
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.why = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('InvalidRequestException')
-    if self.why is not None:
-      oprot.writeFieldBegin('why', TType.STRING, 1)
-      oprot.writeString(self.why)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.why is None:
-      raise TProtocol.TProtocolException(message='Required field why is unset!')
-    return
-
-
-  def __str__(self):
-    return repr(self)
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class UnavailableException(TException):
-  """
-  Not all the replicas required could be created and/or read.
-  """
-
-  thrift_spec = (
-  )
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('UnavailableException')
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __str__(self):
-    return repr(self)
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class TimedOutException(TException):
-  """
-  RPC timeout was exceeded.  either a node failed mid-operation, or load was too high, or the requested op was too large.
-
-  Attributes:
-   - acknowledged_by: if a write operation was acknowledged by some replicas but not by enough to
-  satisfy the required ConsistencyLevel, the number of successful
-  replies will be given here. In case of atomic_batch_mutate method this field
-  will be set to -1 if the batch was written to the batchlog and to 0 if it wasn't.
-   - acknowledged_by_batchlog: in case of atomic_batch_mutate method this field tells if the batch
-  was written to the batchlog.
-   - paxos_in_progress: for the CAS method, this field tells if we timed out during the paxos
-  protocol, as opposed to during the commit of our update
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.I32, 'acknowledged_by', None, None, ), # 1
-    (2, TType.BOOL, 'acknowledged_by_batchlog', None, None, ), # 2
-    (3, TType.BOOL, 'paxos_in_progress', None, None, ), # 3
-  )
-
-  def __init__(self, acknowledged_by=None, acknowledged_by_batchlog=None, paxos_in_progress=None,):
-    self.acknowledged_by = acknowledged_by
-    self.acknowledged_by_batchlog = acknowledged_by_batchlog
-    self.paxos_in_progress = paxos_in_progress
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.I32:
-          self.acknowledged_by = iprot.readI32();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.BOOL:
-          self.acknowledged_by_batchlog = iprot.readBool();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.BOOL:
-          self.paxos_in_progress = iprot.readBool();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('TimedOutException')
-    if self.acknowledged_by is not None:
-      oprot.writeFieldBegin('acknowledged_by', TType.I32, 1)
-      oprot.writeI32(self.acknowledged_by)
-      oprot.writeFieldEnd()
-    if self.acknowledged_by_batchlog is not None:
-      oprot.writeFieldBegin('acknowledged_by_batchlog', TType.BOOL, 2)
-      oprot.writeBool(self.acknowledged_by_batchlog)
-      oprot.writeFieldEnd()
-    if self.paxos_in_progress is not None:
-      oprot.writeFieldBegin('paxos_in_progress', TType.BOOL, 3)
-      oprot.writeBool(self.paxos_in_progress)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __str__(self):
-    return repr(self)
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class AuthenticationException(TException):
-  """
-  invalid authentication request (invalid keyspace, user does not exist, or credentials invalid)
-
-  Attributes:
-   - why
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'why', None, None, ), # 1
-  )
-
-  def __init__(self, why=None,):
-    self.why = why
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.why = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('AuthenticationException')
-    if self.why is not None:
-      oprot.writeFieldBegin('why', TType.STRING, 1)
-      oprot.writeString(self.why)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.why is None:
-      raise TProtocol.TProtocolException(message='Required field why is unset!')
-    return
-
-
-  def __str__(self):
-    return repr(self)
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class AuthorizationException(TException):
-  """
-  invalid authorization request (user does not have access to keyspace)
-
-  Attributes:
-   - why
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'why', None, None, ), # 1
-  )
-
-  def __init__(self, why=None,):
-    self.why = why
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.why = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('AuthorizationException')
-    if self.why is not None:
-      oprot.writeFieldBegin('why', TType.STRING, 1)
-      oprot.writeString(self.why)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.why is None:
-      raise TProtocol.TProtocolException(message='Required field why is unset!')
-    return
-
-
-  def __str__(self):
-    return repr(self)
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class SchemaDisagreementException(TException):
-  """
-  NOTE: This up outdated exception left for backward compatibility reasons,
-  no actual schema agreement validation is done starting from Cassandra 1.2
-
-  schemas are not in agreement across all nodes
-  """
-
-  thrift_spec = (
-  )
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('SchemaDisagreementException')
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __str__(self):
-    return repr(self)
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class ColumnParent:
-  """
-  ColumnParent is used when selecting groups of columns from the same ColumnFamily. In directory structure terms, imagine
-  ColumnParent as ColumnPath + '/../'.
-
-  See also <a href="cassandra.html#Struct_ColumnPath">ColumnPath</a>
-
-  Attributes:
-   - column_family
-   - super_column
-  """
-
-  thrift_spec = (
-    None, # 0
-    None, # 1
-    None, # 2
-    (3, TType.STRING, 'column_family', None, None, ), # 3
-    (4, TType.STRING, 'super_column', None, None, ), # 4
-  )
-
-  def __init__(self, column_family=None, super_column=None,):
-    self.column_family = column_family
-    self.super_column = super_column
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 3:
-        if ftype == TType.STRING:
-          self.column_family = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.STRING:
-          self.super_column = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('ColumnParent')
-    if self.column_family is not None:
-      oprot.writeFieldBegin('column_family', TType.STRING, 3)
-      oprot.writeString(self.column_family)
-      oprot.writeFieldEnd()
-    if self.super_column is not None:
-      oprot.writeFieldBegin('super_column', TType.STRING, 4)
-      oprot.writeString(self.super_column)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.column_family is None:
-      raise TProtocol.TProtocolException(message='Required field column_family is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class ColumnPath:
-  """
-  The ColumnPath is the path to a single column in Cassandra. It might make sense to think of ColumnPath and
-  ColumnParent in terms of a directory structure.
-
-  ColumnPath is used to looking up a single column.
-
-  @param column_family. The name of the CF of the column being looked up.
-  @param super_column. The super column name.
-  @param column. The column name.
-
-  Attributes:
-   - column_family
-   - super_column
-   - column
-  """
-
-  thrift_spec = (
-    None, # 0
-    None, # 1
-    None, # 2
-    (3, TType.STRING, 'column_family', None, None, ), # 3
-    (4, TType.STRING, 'super_column', None, None, ), # 4
-    (5, TType.STRING, 'column', None, None, ), # 5
-  )
-
-  def __init__(self, column_family=None, super_column=None, column=None,):
-    self.column_family = column_family
-    self.super_column = super_column
-    self.column = column
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 3:
-        if ftype == TType.STRING:
-          self.column_family = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.STRING:
-          self.super_column = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.STRING:
-          self.column = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('ColumnPath')
-    if self.column_family is not None:
-      oprot.writeFieldBegin('column_family', TType.STRING, 3)
-      oprot.writeString(self.column_family)
-      oprot.writeFieldEnd()
-    if self.super_column is not None:
-      oprot.writeFieldBegin('super_column', TType.STRING, 4)
-      oprot.writeString(self.super_column)
-      oprot.writeFieldEnd()
-    if self.column is not None:
-      oprot.writeFieldBegin('column', TType.STRING, 5)
-      oprot.writeString(self.column)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.column_family is None:
-      raise TProtocol.TProtocolException(message='Required field column_family is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class SliceRange:
-  """
-  A slice range is a structure that stores basic range, ordering and limit information for a query that will return
-  multiple columns. It could be thought of as Cassandra's version of LIMIT and ORDER BY
-
-  @param start. The column name to start the slice with. This attribute is not required, though there is no default value,
-                and can be safely set to '', i.e., an empty byte array, to start with the first column name. Otherwise, it
-                must a valid value under the rules of the Comparator defined for the given ColumnFamily.
-  @param finish. The column name to stop the slice at. This attribute is not required, though there is no default value,
-                 and can be safely set to an empty byte array to not stop until 'count' results are seen. Otherwise, it
-                 must also be a valid value to the ColumnFamily Comparator.
-  @param reversed. Whether the results should be ordered in reversed order. Similar to ORDER BY blah DESC in SQL.
-  @param count. How many columns to return. Similar to LIMIT in SQL. May be arbitrarily large, but Thrift will
-                materialize the whole result into memory before returning it to the client, so be aware that you may
-                be better served by iterating through slices by passing the last value of one call in as the 'start'
-                of the next instead of increasing 'count' arbitrarily large.
-
-  Attributes:
-   - start
-   - finish
-   - reversed
-   - count
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'start', None, None, ), # 1
-    (2, TType.STRING, 'finish', None, None, ), # 2
-    (3, TType.BOOL, 'reversed', None, False, ), # 3
-    (4, TType.I32, 'count', None, 100, ), # 4
-  )
-
-  def __init__(self, start=None, finish=None, reversed=thrift_spec[3][4], count=thrift_spec[4][4],):
-    self.start = start
-    self.finish = finish
-    self.reversed = reversed
-    self.count = count
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.start = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.finish = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.BOOL:
-          self.reversed = iprot.readBool();
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.I32:
-          self.count = iprot.readI32();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('SliceRange')
-    if self.start is not None:
-      oprot.writeFieldBegin('start', TType.STRING, 1)
-      oprot.writeString(self.start)
-      oprot.writeFieldEnd()
-    if self.finish is not None:
-      oprot.writeFieldBegin('finish', TType.STRING, 2)
-      oprot.writeString(self.finish)
-      oprot.writeFieldEnd()
-    if self.reversed is not None:
-      oprot.writeFieldBegin('reversed', TType.BOOL, 3)
-      oprot.writeBool(self.reversed)
-      oprot.writeFieldEnd()
-    if self.count is not None:
-      oprot.writeFieldBegin('count', TType.I32, 4)
-      oprot.writeI32(self.count)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.start is None:
-      raise TProtocol.TProtocolException(message='Required field start is unset!')
-    if self.finish is None:
-      raise TProtocol.TProtocolException(message='Required field finish is unset!')
-    if self.reversed is None:
-      raise TProtocol.TProtocolException(message='Required field reversed is unset!')
-    if self.count is None:
-      raise TProtocol.TProtocolException(message='Required field count is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class SlicePredicate:
-  """
-  A SlicePredicate is similar to a mathematic predicate (see http://en.wikipedia.org/wiki/Predicate_(mathematical_logic)),
-  which is described as "a property that the elements of a set have in common."
-
-  SlicePredicate's in Cassandra are described with either a list of column_names or a SliceRange.  If column_names is
-  specified, slice_range is ignored.
-
-  @param column_name. A list of column names to retrieve. This can be used similar to Memcached's "multi-get" feature
-                      to fetch N known column names. For instance, if you know you wish to fetch columns 'Joe', 'Jack',
-                      and 'Jim' you can pass those column names as a list to fetch all three at once.
-  @param slice_range. A SliceRange describing how to range, order, and/or limit the slice.
-
-  Attributes:
-   - column_names
-   - slice_range
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.LIST, 'column_names', (TType.STRING,None), None, ), # 1
-    (2, TType.STRUCT, 'slice_range', (SliceRange, SliceRange.thrift_spec), None, ), # 2
-  )
-
-  def __init__(self, column_names=None, slice_range=None,):
-    self.column_names = column_names
-    self.slice_range = slice_range
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.LIST:
-          self.column_names = []
-          (_etype17, _size14) = iprot.readListBegin()
-          for _i18 in xrange(_size14):
-            _elem19 = iprot.readString();
-            self.column_names.append(_elem19)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRUCT:
-          self.slice_range = SliceRange()
-          self.slice_range.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('SlicePredicate')
-    if self.column_names is not None:
-      oprot.writeFieldBegin('column_names', TType.LIST, 1)
-      oprot.writeListBegin(TType.STRING, len(self.column_names))
-      for iter20 in self.column_names:
-        oprot.writeString(iter20)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.slice_range is not None:
-      oprot.writeFieldBegin('slice_range', TType.STRUCT, 2)
-      self.slice_range.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class IndexExpression:
-  """
-  Attributes:
-   - column_name
-   - op
-   - value
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'column_name', None, None, ), # 1
-    (2, TType.I32, 'op', None, None, ), # 2
-    (3, TType.STRING, 'value', None, None, ), # 3
-  )
-
-  def __init__(self, column_name=None, op=None, value=None,):
-    self.column_name = column_name
-    self.op = op
-    self.value = value
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.column_name = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I32:
-          self.op = iprot.readI32();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRING:
-          self.value = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('IndexExpression')
-    if self.column_name is not None:
-      oprot.writeFieldBegin('column_name', TType.STRING, 1)
-      oprot.writeString(self.column_name)
-      oprot.writeFieldEnd()
-    if self.op is not None:
-      oprot.writeFieldBegin('op', TType.I32, 2)
-      oprot.writeI32(self.op)
-      oprot.writeFieldEnd()
-    if self.value is not None:
-      oprot.writeFieldBegin('value', TType.STRING, 3)
-      oprot.writeString(self.value)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.column_name is None:
-      raise TProtocol.TProtocolException(message='Required field column_name is unset!')
-    if self.op is None:
-      raise TProtocol.TProtocolException(message='Required field op is unset!')
-    if self.value is None:
-      raise TProtocol.TProtocolException(message='Required field value is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class IndexClause:
-  """
-  @deprecated use a KeyRange with row_filter in get_range_slices instead
-
-  Attributes:
-   - expressions
-   - start_key
-   - count
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.LIST, 'expressions', (TType.STRUCT,(IndexExpression, IndexExpression.thrift_spec)), None, ), # 1
-    (2, TType.STRING, 'start_key', None, None, ), # 2
-    (3, TType.I32, 'count', None, 100, ), # 3
-  )
-
-  def __init__(self, expressions=None, start_key=None, count=thrift_spec[3][4],):
-    self.expressions = expressions
-    self.start_key = start_key
-    self.count = count
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.LIST:
-          self.expressions = []
-          (_etype24, _size21) = iprot.readListBegin()
-          for _i25 in xrange(_size21):
-            _elem26 = IndexExpression()
-            _elem26.read(iprot)
-            self.expressions.append(_elem26)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.start_key = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.I32:
-          self.count = iprot.readI32();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('IndexClause')
-    if self.expressions is not None:
-      oprot.writeFieldBegin('expressions', TType.LIST, 1)
-      oprot.writeListBegin(TType.STRUCT, len(self.expressions))
-      for iter27 in self.expressions:
-        iter27.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.start_key is not None:
-      oprot.writeFieldBegin('start_key', TType.STRING, 2)
-      oprot.writeString(self.start_key)
-      oprot.writeFieldEnd()
-    if self.count is not None:
-      oprot.writeFieldBegin('count', TType.I32, 3)
-      oprot.writeI32(self.count)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.expressions is None:
-      raise TProtocol.TProtocolException(message='Required field expressions is unset!')
-    if self.start_key is None:
-      raise TProtocol.TProtocolException(message='Required field start_key is unset!')
-    if self.count is None:
-      raise TProtocol.TProtocolException(message='Required field count is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class KeyRange:
-  """
-  The semantics of start keys and tokens are slightly different.
-  Keys are start-inclusive; tokens are start-exclusive.  Token
-  ranges may also wrap -- that is, the end token may be less
-  than the start one.  Thus, a range from keyX to keyX is a
-  one-element range, but a range from tokenY to tokenY is the
-  full ring.
-
-  Attributes:
-   - start_key
-   - end_key
-   - start_token
-   - end_token
-   - row_filter
-   - count
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'start_key', None, None, ), # 1
-    (2, TType.STRING, 'end_key', None, None, ), # 2
-    (3, TType.STRING, 'start_token', None, None, ), # 3
-    (4, TType.STRING, 'end_token', None, None, ), # 4
-    (5, TType.I32, 'count', None, 100, ), # 5
-    (6, TType.LIST, 'row_filter', (TType.STRUCT,(IndexExpression, IndexExpression.thrift_spec)), None, ), # 6
-  )
-
-  def __init__(self, start_key=None, end_key=None, start_token=None, end_token=None, row_filter=None, count=thrift_spec[5][4],):
-    self.start_key = start_key
-    self.end_key = end_key
-    self.start_token = start_token
-    self.end_token = end_token
-    self.row_filter = row_filter
-    self.count = count
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.start_key = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.end_key = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRING:
-          self.start_token = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.STRING:
-          self.end_token = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 6:
-        if ftype == TType.LIST:
-          self.row_filter = []
-          (_etype31, _size28) = iprot.readListBegin()
-          for _i32 in xrange(_size28):
-            _elem33 = IndexExpression()
-            _elem33.read(iprot)
-            self.row_filter.append(_elem33)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.I32:
-          self.count = iprot.readI32();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('KeyRange')
-    if self.start_key is not None:
-      oprot.writeFieldBegin('start_key', TType.STRING, 1)
-      oprot.writeString(self.start_key)
-      oprot.writeFieldEnd()
-    if self.end_key is not None:
-      oprot.writeFieldBegin('end_key', TType.STRING, 2)
-      oprot.writeString(self.end_key)
-      oprot.writeFieldEnd()
-    if self.start_token is not None:
-      oprot.writeFieldBegin('start_token', TType.STRING, 3)
-      oprot.writeString(self.start_token)
-      oprot.writeFieldEnd()
-    if self.end_token is not None:
-      oprot.writeFieldBegin('end_token', TType.STRING, 4)
-      oprot.writeString(self.end_token)
-      oprot.writeFieldEnd()
-    if self.count is not None:
-      oprot.writeFieldBegin('count', TType.I32, 5)
-      oprot.writeI32(self.count)
-      oprot.writeFieldEnd()
-    if self.row_filter is not None:
-      oprot.writeFieldBegin('row_filter', TType.LIST, 6)
-      oprot.writeListBegin(TType.STRUCT, len(self.row_filter))
-      for iter34 in self.row_filter:
-        iter34.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.count is None:
-      raise TProtocol.TProtocolException(message='Required field count is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class KeySlice:
-  """
-  A KeySlice is key followed by the data it maps to. A collection of KeySlice is returned by the get_range_slice operation.
-
-  @param key. a row key
-  @param columns. List of data represented by the key. Typically, the list is pared down to only the columns specified by
-                  a SlicePredicate.
-
-  Attributes:
-   - key
-   - columns
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'key', None, None, ), # 1
-    (2, TType.LIST, 'columns', (TType.STRUCT,(ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec)), None, ), # 2
-  )
-
-  def __init__(self, key=None, columns=None,):
-    self.key = key
-    self.columns = columns
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.key = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.LIST:
-          self.columns = []
-          (_etype38, _size35) = iprot.readListBegin()
-          for _i39 in xrange(_size35):
-            _elem40 = ColumnOrSuperColumn()
-            _elem40.read(iprot)
-            self.columns.append(_elem40)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('KeySlice')
-    if self.key is not None:
-      oprot.writeFieldBegin('key', TType.STRING, 1)
-      oprot.writeString(self.key)
-      oprot.writeFieldEnd()
-    if self.columns is not None:
-      oprot.writeFieldBegin('columns', TType.LIST, 2)
-      oprot.writeListBegin(TType.STRUCT, len(self.columns))
-      for iter41 in self.columns:
-        iter41.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.key is None:
-      raise TProtocol.TProtocolException(message='Required field key is unset!')
-    if self.columns is None:
-      raise TProtocol.TProtocolException(message='Required field columns is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class KeyCount:
-  """
-  Attributes:
-   - key
-   - count
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'key', None, None, ), # 1
-    (2, TType.I32, 'count', None, None, ), # 2
-  )
-
-  def __init__(self, key=None, count=None,):
-    self.key = key
-    self.count = count
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.key = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I32:
-          self.count = iprot.readI32();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('KeyCount')
-    if self.key is not None:
-      oprot.writeFieldBegin('key', TType.STRING, 1)
-      oprot.writeString(self.key)
-      oprot.writeFieldEnd()
-    if self.count is not None:
-      oprot.writeFieldBegin('count', TType.I32, 2)
-      oprot.writeI32(self.count)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.key is None:
-      raise TProtocol.TProtocolException(message='Required field key is unset!')
-    if self.count is None:
-      raise TProtocol.TProtocolException(message='Required field count is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class Deletion:
-  """
-  Note that the timestamp is only optional in case of counter deletion.
-
-  Attributes:
-   - timestamp
-   - super_column
-   - predicate
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.I64, 'timestamp', None, None, ), # 1
-    (2, TType.STRING, 'super_column', None, None, ), # 2
-    (3, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 3
-  )
-
-  def __init__(self, timestamp=None, super_column=None, predicate=None,):
-    self.timestamp = timestamp
-    self.super_column = super_column
-    self.predicate = predicate
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.I64:
-          self.timestamp = iprot.readI64();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.super_column = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRUCT:
-          self.predicate = SlicePredicate()
-          self.predicate.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('Deletion')
-    if self.timestamp is not None:
-      oprot.writeFieldBegin('timestamp', TType.I64, 1)
-      oprot.writeI64(self.timestamp)
-      oprot.writeFieldEnd()
-    if self.super_column is not None:
-      oprot.writeFieldBegin('super_column', TType.STRING, 2)
-      oprot.writeString(self.super_column)
-      oprot.writeFieldEnd()
-    if self.predicate is not None:
-      oprot.writeFieldBegin('predicate', TType.STRUCT, 3)
-      self.predicate.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class Mutation:
-  """
-  A Mutation is either an insert (represented by filling column_or_supercolumn) or a deletion (represented by filling the deletion attribute).
-  @param column_or_supercolumn. An insert to a column or supercolumn (possibly counter column or supercolumn)
-  @param deletion. A deletion of a column or supercolumn
-
-  Attributes:
-   - column_or_supercolumn
-   - deletion
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'column_or_supercolumn', (ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec), None, ), # 1
-    (2, TType.STRUCT, 'deletion', (Deletion, Deletion.thrift_spec), None, ), # 2
-  )
-
-  def __init__(self, column_or_supercolumn=None, deletion=None,):
-    self.column_or_supercolumn = column_or_supercolumn
-    self.deletion = deletion
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.column_or_supercolumn = ColumnOrSuperColumn()
-          self.column_or_supercolumn.read(iprot)
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRUCT:
-          self.deletion = Deletion()
-          self.deletion.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('Mutation')
-    if self.column_or_supercolumn is not None:
-      oprot.writeFieldBegin('column_or_supercolumn', TType.STRUCT, 1)
-      self.column_or_supercolumn.write(oprot)
-      oprot.writeFieldEnd()
-    if self.deletion is not None:
-      oprot.writeFieldBegin('deletion', TType.STRUCT, 2)
-      self.deletion.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class EndpointDetails:
-  """
-  Attributes:
-   - host
-   - datacenter
-   - rack
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'host', None, None, ), # 1
-    (2, TType.STRING, 'datacenter', None, None, ), # 2
-    (3, TType.STRING, 'rack', None, None, ), # 3
-  )
-
-  def __init__(self, host=None, datacenter=None, rack=None,):
-    self.host = host
-    self.datacenter = datacenter
-    self.rack = rack
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.host = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.datacenter = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRING:
-          self.rack = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('EndpointDetails')
-    if self.host is not None:
-      oprot.writeFieldBegin('host', TType.STRING, 1)
-      oprot.writeString(self.host)
-      oprot.writeFieldEnd()
-    if self.datacenter is not None:
-      oprot.writeFieldBegin('datacenter', TType.STRING, 2)
-      oprot.writeString(self.datacenter)
-      oprot.writeFieldEnd()
-    if self.rack is not None:
-      oprot.writeFieldBegin('rack', TType.STRING, 3)
-      oprot.writeString(self.rack)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class CASResult:
-  """
-  Attributes:
-   - success
-   - current_values
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.BOOL, 'success', None, None, ), # 1
-    (2, TType.LIST, 'current_values', (TType.STRUCT,(Column, Column.thrift_spec)), None, ), # 2
-  )
-
-  def __init__(self, success=None, current_values=None,):
-    self.success = success
-    self.current_values = current_values
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.BOOL:
-          self.success = iprot.readBool();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.LIST:
-          self.current_values = []
-          (_etype45, _size42) = iprot.readListBegin()
-          for _i46 in xrange(_size42):
-            _elem47 = Column()
-            _elem47.read(iprot)
-            self.current_values.append(_elem47)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('CASResult')
-    if self.success is not None:
-      oprot.writeFieldBegin('success', TType.BOOL, 1)
-      oprot.writeBool(self.success)
-      oprot.writeFieldEnd()
-    if self.current_values is not None:
-      oprot.writeFieldBegin('current_values', TType.LIST, 2)
-      oprot.writeListBegin(TType.STRUCT, len(self.current_values))
-      for iter48 in self.current_values:
-        iter48.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.success is None:
-      raise TProtocol.TProtocolException(message='Required field success is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class TokenRange:
-  """
-  A TokenRange describes part of the Cassandra ring, it is a mapping from a range to
-  endpoints responsible for that range.
-  @param start_token The first token in the range
-  @param end_token The last token in the range
-  @param endpoints The endpoints responsible for the range (listed by their configured listen_address)
-  @param rpc_endpoints The endpoints responsible for the range (listed by their configured rpc_address)
-
-  Attributes:
-   - start_token
-   - end_token
-   - endpoints
-   - rpc_endpoints
-   - endpoint_details
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'start_token', None, None, ), # 1
-    (2, TType.STRING, 'end_token', None, None, ), # 2
-    (3, TType.LIST, 'endpoints', (TType.STRING,None), None, ), # 3
-    (4, TType.LIST, 'rpc_endpoints', (TType.STRING,None), None, ), # 4
-    (5, TType.LIST, 'endpoint_details', (TType.STRUCT,(EndpointDetails, EndpointDetails.thrift_spec)), None, ), # 5
-  )
-
-  def __init__(self, start_token=None, end_token=None, endpoints=None, rpc_endpoints=None, endpoint_details=None,):
-    self.start_token = start_token
-    self.end_token = end_token
-    self.endpoints = endpoints
-    self.rpc_endpoints = rpc_endpoints
-    self.endpoint_details = endpoint_details
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.start_token = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.end_token = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.LIST:
-          self.endpoints = []
-          (_etype52, _size49) = iprot.readListBegin()
-          for _i53 in xrange(_size49):
-            _elem54 = iprot.readString();
-            self.endpoints.append(_elem54)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.LIST:
-          self.rpc_endpoints = []
-          (_etype58, _size55) = iprot.readListBegin()
-          for _i59 in xrange(_size55):
-            _elem60 = iprot.readString();
-            self.rpc_endpoints.append(_elem60)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.LIST:
-          self.endpoint_details = []
-          (_etype64, _size61) = iprot.readListBegin()
-          for _i65 in xrange(_size61):
-            _elem66 = EndpointDetails()
-            _elem66.read(iprot)
-            self.endpoint_details.append(_elem66)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('TokenRange')
-    if self.start_token is not None:
-      oprot.writeFieldBegin('start_token', TType.STRING, 1)
-      oprot.writeString(self.start_token)
-      oprot.writeFieldEnd()
-    if self.end_token is not None:
-      oprot.writeFieldBegin('end_token', TType.STRING, 2)
-      oprot.writeString(self.end_token)
-      oprot.writeFieldEnd()
-    if self.endpoints is not None:
-      oprot.writeFieldBegin('endpoints', TType.LIST, 3)
-      oprot.writeListBegin(TType.STRING, len(self.endpoints))
-      for iter67 in self.endpoints:
-        oprot.writeString(iter67)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.rpc_endpoints is not None:
-      oprot.writeFieldBegin('rpc_endpoints', TType.LIST, 4)
-      oprot.writeListBegin(TType.STRING, len(self.rpc_endpoints))
-      for iter68 in self.rpc_endpoints:
-        oprot.writeString(iter68)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.endpoint_details is not None:
-      oprot.writeFieldBegin('endpoint_details', TType.LIST, 5)
-      oprot.writeListBegin(TType.STRUCT, len(self.endpoint_details))
-      for iter69 in self.endpoint_details:
-        iter69.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.start_token is None:
-      raise TProtocol.TProtocolException(message='Required field start_token is unset!')
-    if self.end_token is None:
-      raise TProtocol.TProtocolException(message='Required field end_token is unset!')
-    if self.endpoints is None:
-      raise TProtocol.TProtocolException(message='Required field endpoints is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class AuthenticationRequest:
-  """
-  Authentication requests can contain any data, dependent on the IAuthenticator used
-
-  Attributes:
-   - credentials
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.MAP, 'credentials', (TType.STRING,None,TType.STRING,None), None, ), # 1
-  )
-
-  def __init__(self, credentials=None,):
-    self.credentials = credentials
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.MAP:
-          self.credentials = {}
-          (_ktype71, _vtype72, _size70 ) = iprot.readMapBegin() 
-          for _i74 in xrange(_size70):
-            _key75 = iprot.readString();
-            _val76 = iprot.readString();
-            self.credentials[_key75] = _val76
-          iprot.readMapEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('AuthenticationRequest')
-    if self.credentials is not None:
-      oprot.writeFieldBegin('credentials', TType.MAP, 1)
-      oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.credentials))
-      for kiter77,viter78 in self.credentials.items():
-        oprot.writeString(kiter77)
-        oprot.writeString(viter78)
-      oprot.writeMapEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.credentials is None:
-      raise TProtocol.TProtocolException(message='Required field credentials is unset!')
-    return
-
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class ColumnDef:
-  """
-  Attributes:
-   - name
-   - validation_class
-   - index_type
-   - index_name
-   - index_options
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'name', None, None, ), # 1
-    (2, TType.STRING, 'validation_class', None, None, ), # 2
-    (3, TType.I32, 'index_type', None, None, ), # 3
-    (4, TType.STRING, 'index_name', None, None, ), # 4
-    (5, TType.MAP, 'index_options', (TType.STRING,None,TType.STRING,None), None, ), # 5
-  )
-
-  def __init__(self, name=None, validation_class=None, index_type=None, index_name=None, index_options=None,):
-    self.name = name
-    self.validation_class = validation_class
-    self.index_type = index_type
-    self.index_name = index_name
-    self.index_options = index_options
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.name = iprot.readString();
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.validation_class = iprot.readStr

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[20/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/repair_tests/repair_test.py
----------------------------------------------------------------------
diff --git a/repair_tests/repair_test.py b/repair_tests/repair_test.py
index 238871f..59910e0 100644
--- a/repair_tests/repair_test.py
+++ b/repair_tests/repair_test.py
@@ -3,18 +3,21 @@ import os.path
 import threading
 import time
 import re
+import pytest
+import logging
+
 from collections import namedtuple
 from threading import Thread
-from unittest import skip, skipIf
 
 from cassandra import ConsistencyLevel
 from cassandra.query import SimpleStatement
 from ccmlib.node import ToolError
-from nose.plugins.attrib import attr
 
-from dtest import CASSANDRA_VERSION_FROM_BUILD, FlakyRetryPolicy, Tester, debug, create_ks, create_cf
+from dtest import CASSANDRA_VERSION_FROM_BUILD, FlakyRetryPolicy, Tester, create_ks, create_cf
 from tools.data import insert_c1c2, query_c1c2
-from tools.decorators import no_vnodes, since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 def _repair_options(version, ks='', cf=None, sequential=True):
@@ -46,7 +49,6 @@ def _repair_options(version, ks='', cf=None, sequential=True):
 
 
 class BaseRepairTest(Tester):
-    __test__ = False
 
     def check_rows_on_node(self, node_to_check, rows, found=None, missings=None, restart=True):
         """
@@ -64,14 +66,14 @@ class BaseRepairTest(Tester):
             missings = []
         stopped_nodes = []
 
-        for node in self.cluster.nodes.values():
+        for node in list(self.cluster.nodes.values()):
             if node.is_running() and node is not node_to_check:
                 stopped_nodes.append(node)
                 node.stop(wait_other_notice=True)
 
         session = self.patient_exclusive_cql_connection(node_to_check, 'ks')
-        result = list(session.execute("SELECT * FROM cf LIMIT {}".format(rows * 2)))
-        self.assertEqual(len(result), rows)
+        result = list(session.execute("SELECT * FROM cf LIMIT {}".format(rows * 2), timeout=10))
+        assert len(result) == rows
 
         for k in found:
             query_c1c2(session, k, ConsistencyLevel.ONE)
@@ -79,7 +81,7 @@ class BaseRepairTest(Tester):
         for k in missings:
             query = SimpleStatement("SELECT c1, c2 FROM cf WHERE key='k{}'".format(k), consistency_level=ConsistencyLevel.ONE)
             res = list(session.execute(query))
-            self.assertEqual(len(filter(lambda x: len(x) != 0, res)), 0, res)
+            assert len([x for x in res if len(x) != 0]) == 0, res
 
         if restart:
             for node in stopped_nodes:
@@ -92,7 +94,7 @@ class BaseRepairTest(Tester):
         # interfere with the test (this must be after the populate)
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate(3).start()
         node1, node2, node3 = cluster.nodelist()
 
@@ -101,13 +103,13 @@ class BaseRepairTest(Tester):
         create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
 
         # Insert 1000 keys, kill node 3, insert 1 key, restart node 3, insert 1000 more keys
-        debug("Inserting data...")
+        logger.debug("Inserting data...")
         insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)
         node3.flush()
         node3.stop(wait_other_notice=True)
         insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.TWO)
         node3.start(wait_other_notice=True, wait_for_binary_proto=True)
-        insert_c1c2(session, keys=range(1001, 2001), consistency=ConsistencyLevel.ALL)
+        insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL)
 
         cluster.flush()
 
@@ -116,46 +118,46 @@ class BaseRepairTest(Tester):
         node1, node2, node3 = cluster.nodelist()
 
         # Verify that node3 has only 2000 keys
-        debug("Checking data on node3...")
+        logger.debug("Checking data on node3...")
         self.check_rows_on_node(node3, 2000, missings=[1000])
 
         # Verify that node1 has 2001 keys
-        debug("Checking data on node1...")
+        logger.debug("Checking data on node1...")
         self.check_rows_on_node(node1, 2001, found=[1000])
 
         # Verify that node2 has 2001 keys
-        debug("Checking data on node2...")
+        logger.debug("Checking data on node2...")
         self.check_rows_on_node(node2, 2001, found=[1000])
 
         time.sleep(10)  # see CASSANDRA-4373
         # Run repair
         start = time.time()
-        debug("starting repair...")
+        logger.debug("starting repair...")
         node1.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
-        debug("Repair time: {end}".format(end=time.time() - start))
+        logger.debug("Repair time: {end}".format(end=time.time() - start))
 
         # Validate that only one range was transfered
         out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
 
-        self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]))
+        assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
 
         valid_out_of_sync_pairs = [{node1.address(), node3.address()},
                                    {node2.address(), node3.address()}]
 
         for line, m in out_of_sync_logs:
             num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
-            self.assertEqual(int(num_out_of_sync_ranges), 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, line))
-            self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
+            assert int(num_out_of_sync_ranges) == 1, \
+                "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
+            assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
 
         # Check node3 now has the key
         self.check_rows_on_node(node3, 2001, found=[1000], restart=False)
 
 
 class TestRepair(BaseRepairTest):
-    __test__ = True
 
-    @since('2.2.1', '4')
-    def no_anticompaction_after_dclocal_repair_test(self):
+    @since('2.2.1', max_version='4')
+    def test_no_anticompaction_after_dclocal_repair(self):
         """
         * Launch a four node, two DC cluster
         * Start a -local repair on node1 in dc1
@@ -166,44 +168,44 @@ class TestRepair(BaseRepairTest):
         @jira_ticket CASSANDRA-10422
         """
         cluster = self.cluster
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate([2, 2]).start(wait_for_binary_proto=True)
         node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
         node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50'])
         node1_1.nodetool("repair -local keyspace1 standard1")
-        self.assertTrue(node1_1.grep_log("Not a global repair"))
-        self.assertTrue(node2_1.grep_log("Not a global repair"))
+        assert node1_1.grep_log("Not a global repair")
+        assert node2_1.grep_log("Not a global repair")
 
         # dc2 should not see these messages:
-        self.assertFalse(node1_2.grep_log("Not a global repair"))
-        self.assertFalse(node2_2.grep_log("Not a global repair"))
+        assert not node1_2.grep_log("Not a global repair")
+        assert not node2_2.grep_log("Not a global repair")
 
         # and no nodes should do anticompaction:
         for node in cluster.nodelist():
-            self.assertFalse(node.grep_log("Starting anticompaction"))
+            assert not node.grep_log("Starting anticompaction")
 
-    @skipIf(CASSANDRA_VERSION_FROM_BUILD == '3.9', "Test doesn't run on 3.9")
-    def nonexistent_table_repair_test(self):
+    @pytest.mark.skipif(CASSANDRA_VERSION_FROM_BUILD == '3.9', reason="Test doesn't run on 3.9")
+    def test_nonexistent_table_repair(self):
         """
         * Check that repairing a non-existent table fails
         @jira_ticket CASSANDRA-12279
         """
-        self.ignore_log_patterns = [r'Unknown keyspace/cf pair']
+        self.fixture_dtest_setup.ignore_log_patterns = [r'Unknown keyspace/cf pair']
         cluster = self.cluster
-        debug('Starting nodes')
+        logger.debug('Starting nodes')
         cluster.populate(2).start(wait_for_binary_proto=True)
         node1, _ = cluster.nodelist()
-        debug('Creating keyspace and tables')
+        logger.debug('Creating keyspace and tables')
         node1.stress(stress_options=['write', 'n=1', 'no-warmup',
                                      'cl=ONE', '-schema', 'replication(factor=2)',
                                      '-rate', 'threads=1'])
-        debug('Repairing non-existent table')
+        logger.debug('Repairing non-existent table')
 
         def repair_non_existent_table():
             global nodetool_error
             try:
                 node1.nodetool('repair keyspace1 standard2')
-            except Exception, e:
+            except Exception as e:
                 nodetool_error = e
 
         # Launch in a external thread so it does not hang process
@@ -211,20 +213,20 @@ class TestRepair(BaseRepairTest):
         t.start()
 
         t.join(timeout=60)
-        self.assertFalse(t.isAlive(), 'Repair thread on inexistent table is still running')
+        assert not t.is_alive(), 'Repair thread on inexistent table is still running'
 
         if self.cluster.version() >= '2.2':
             node1.watch_log_for("Unknown keyspace/cf pair", timeout=60)
         # Repair only finishes with error status after CASSANDRA-12508 on 3.0+
         if self.cluster.version() >= '3.0':
-            self.assertTrue('nodetool_error' in globals() and isinstance(nodetool_error, ToolError),
-                            'Repair thread on inexistent table did not throw exception')
-            debug(nodetool_error.message)
-            self.assertTrue('Unknown keyspace/cf pair' in nodetool_error.message,
-                            'Repair thread on inexistent table did not detect inexistent table.')
+            assert 'nodetool_error' in globals() and isinstance(nodetool_error, ToolError), \
+                'Repair thread on inexistent table did not throw exception'
+            logger.debug(repr(nodetool_error))
+            assert 'Unknown keyspace/cf pair' in repr(nodetool_error),\
+                'Repair thread on inexistent table did not detect inexistent table.'
 
-    @since('2.2.1', '4')
-    def no_anticompaction_after_hostspecific_repair_test(self):
+    @since('2.2.1', max_version='4')
+    def test_no_anticompaction_after_hostspecific_repair(self):
         """
         * Launch a four node, two DC cluster
         * Start a repair on all nodes, by enumerating with -hosts
@@ -234,18 +236,18 @@ class TestRepair(BaseRepairTest):
         @jira_ticket CASSANDRA-10422
         """
         cluster = self.cluster
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate([2, 2]).start(wait_for_binary_proto=True)
         node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
         node1_1.stress(stress_options=['write', 'n=100K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50'])
         node1_1.nodetool("repair -hosts 127.0.0.1,127.0.0.2,127.0.0.3,127.0.0.4 keyspace1 standard1")
         for node in cluster.nodelist():
-            self.assertTrue(node.grep_log("Not a global repair"))
+            assert node.grep_log("Not a global repair")
         for node in cluster.nodelist():
-            self.assertFalse(node.grep_log("Starting anticompaction"))
+            assert not node.grep_log("Starting anticompaction")
 
-    @since('2.2.4', '4')
-    def no_anticompaction_after_subrange_repair_test(self):
+    @since('2.2.4', max_version='4')
+    def test_no_anticompaction_after_subrange_repair(self):
         """
         * Launch a three node, two DC cluster
         * Start a repair on a token range
@@ -255,15 +257,15 @@ class TestRepair(BaseRepairTest):
         @jira_ticket CASSANDRA-10422
         """
         cluster = self.cluster
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate(3).start(wait_for_binary_proto=True)
         node1, node2, node3 = cluster.nodelist()
         node1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50'])
         node1.nodetool("repair -st 0 -et 1000 keyspace1 standard1")
         for node in cluster.nodelist():
-            self.assertTrue(node.grep_log("Not a global repair"))
+            assert node.grep_log("Not a global repair")
         for node in cluster.nodelist():
-            self.assertFalse(node.grep_log("Starting anticompaction"))
+            assert not node.grep_log("Starting anticompaction")
 
     def _get_repaired_data(self, node, keyspace):
         """
@@ -276,17 +278,17 @@ class TestRepair(BaseRepairTest):
         out = node.run_sstablemetadata(keyspace=keyspace).stdout
 
         def matches(pattern):
-            return filter(None, [pattern.match(l) for l in out.split('\n')])
+            return [_f for _f in [pattern.match(l) for l in out.decode("utf-8").split('\n')] if _f]
 
         names = [m.group(1) for m in matches(_sstable_name)]
         repaired_times = [int(m.group(1)) for m in matches(_repaired_at)]
 
-        self.assertTrue(names)
-        self.assertTrue(repaired_times)
+        assert names
+        assert repaired_times
         return [_sstable_data(*a) for a in zip(names, repaired_times)]
 
-    @since('2.2.10', '4')
-    def no_anticompaction_of_already_repaired_test(self):
+    @since('2.2.10', max_version='4')
+    def test_no_anticompaction_of_already_repaired(self):
         """
         * Launch three node cluster and stress with RF2
         * Do incremental repair to have all sstables flagged as repaired
@@ -294,9 +296,8 @@ class TestRepair(BaseRepairTest):
         * Verify that none of the already repaired sstables have been anti-compacted again
         @jira_ticket CASSANDRA-13153
         """
-
         cluster = self.cluster
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         # disable JBOD conf since the test expects sstables to be on the same disk
         cluster.set_datadir_count(1)
         cluster.populate(3).start(wait_for_binary_proto=True)
@@ -309,7 +310,7 @@ class TestRepair(BaseRepairTest):
         node1.nodetool("repair keyspace1 standard1")
         meta = self._get_repaired_data(node1, 'keyspace1')
         repaired = set([m for m in meta if m.repaired > 0])
-        self.assertEquals(len(repaired), len(meta))
+        assert len(repaired) == len(meta)
 
         # stop node2, stress and start full repair to find out how synced ranges affect repairedAt values
         node2.stop(wait_other_notice=True)
@@ -320,10 +321,10 @@ class TestRepair(BaseRepairTest):
         meta = self._get_repaired_data(node1, 'keyspace1')
         repairedAfterFull = set([m for m in meta if m.repaired > 0])
         # already repaired sstables must remain untouched
-        self.assertEquals(repaired.intersection(repairedAfterFull), repaired)
+        assert repaired.intersection(repairedAfterFull) == repaired
 
     @since('2.2.1', '4')
-    def anticompaction_after_normal_repair_test(self):
+    def test_anticompaction_after_normal_repair(self):
         """
         * Launch a four node, two DC cluster
         * Start a normal repair
@@ -331,80 +332,80 @@ class TestRepair(BaseRepairTest):
         @jira_ticket CASSANDRA-10422
         """
         cluster = self.cluster
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate([2, 2]).start(wait_for_binary_proto=True)
         node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
         node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)'])
         node1_1.nodetool("repair keyspace1 standard1")
         for node in cluster.nodelist():
-            self.assertTrue("Starting anticompaction")
+            assert "Starting anticompaction"
 
-    def simple_sequential_repair_test(self):
+    def test_simple_sequential_repair(self):
         """
         Calls simple repair test with a sequential repair
         """
         self._simple_repair(sequential=True)
 
-    def simple_parallel_repair_test(self):
+    def test_simple_parallel_repair(self):
         """
         Calls simple repair test with a parallel repair
         """
         self._simple_repair(sequential=False)
 
-    def empty_vs_gcable_sequential_repair_test(self):
+    def test_empty_vs_gcable_sequential_repair(self):
         """
         Calls empty_vs_gcable repair test with a sequential repair
         """
         self._empty_vs_gcable_no_repair(sequential=True)
 
-    def empty_vs_gcable_parallel_repair_test(self):
+    def test_empty_vs_gcable_parallel_repair(self):
         """
         Calls empty_vs_gcable repair test with a parallel repair
         """
         self._empty_vs_gcable_no_repair(sequential=False)
 
-    def range_tombstone_digest_sequential_repair_test(self):
+    def test_range_tombstone_digest_sequential_repair(self):
         """
         Calls range_tombstone_digest with a sequential repair
         """
         self._range_tombstone_digest(sequential=True)
 
-    def range_tombstone_digest_parallel_repair_test(self):
+    def test_range_tombstone_digest_parallel_repair(self):
         """
         Calls range_tombstone_digest with a parallel repair
         """
         self._range_tombstone_digest(sequential=False)
 
     @since('2.1')
-    def shadowed_cell_digest_sequential_repair_test(self):
+    def test_shadowed_cell_digest_sequential_repair(self):
         """
         Calls _cell_shadowed_by_range_tombstone with sequential repair
         """
         self._cell_shadowed_by_range_tombstone(sequential=True)
 
     @since('2.1')
-    def shadowed_cell_digest_parallel_repair_test(self):
+    def test_shadowed_cell_digest_parallel_repair(self):
         """
         Calls _cell_shadowed_by_range_tombstone with parallel repair
         """
         self._cell_shadowed_by_range_tombstone(sequential=False)
 
     @since('3.0')
-    def shadowed_range_tombstone_digest_sequential_repair_test(self):
+    def test_shadowed_range_tombstone_digest_sequential_repair(self):
         """
         Calls _range_tombstone_shadowed_by_range_tombstone with sequential repair
         """
         self._range_tombstone_shadowed_by_range_tombstone(sequential=True)
 
     @since('3.0')
-    def shadowed_range_tombstone_digest_parallel_repair_test(self):
+    def test_shadowed_range_tombstone_digest_parallel_repair(self):
         """
         Calls _range_tombstone_shadowed_by_range_tombstone with parallel repair
         """
         self._range_tombstone_shadowed_by_range_tombstone(sequential=False)
 
-    @no_vnodes()
-    def simple_repair_order_preserving_test(self):
+    @pytest.mark.no_vnodes
+    def test_simple_repair_order_preserving(self):
         """
         Calls simple repair test with OPP and sequential repair
         @jira_ticket CASSANDRA-5220
@@ -484,18 +485,18 @@ class TestRepair(BaseRepairTest):
         node2.stop(wait_other_notice=True)
         for cf in ['cf1', 'cf2']:
             # insert some data
-            for i in xrange(0, 10):
-                for j in xrange(0, 1000):
+            for i in range(0, 10):
+                for j in range(0, 1000):
                     query = SimpleStatement("INSERT INTO {} (key, c1, c2) VALUES ('k{}', 'v{}', 'value')".format(cf, i, j), consistency_level=ConsistencyLevel.ONE)
                     session.execute(query)
             node1.flush()
             # delete those data, half with row tombstone, and the rest with cell range tombstones
-            for i in xrange(0, 5):
+            for i in range(0, 5):
                 query = SimpleStatement("DELETE FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ONE)
                 session.execute(query)
             node1.flush()
-            for i in xrange(5, 10):
-                for j in xrange(0, 1000):
+            for i in range(5, 10):
+                for j in range(0, 1000):
                     query = SimpleStatement("DELETE FROM {} WHERE key='k{}' AND c1='v{}'".format(cf, i, j), consistency_level=ConsistencyLevel.ONE)
                     session.execute(query)
             node1.flush()
@@ -509,17 +510,17 @@ class TestRepair(BaseRepairTest):
 
         # check no rows will be returned
         for cf in ['cf1', 'cf2']:
-            for i in xrange(0, 10):
+            for i in range(0, 10):
                 query = SimpleStatement("SELECT c1, c2 FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ALL)
                 res = list(session.execute(query))
-                self.assertEqual(len(filter(lambda x: len(x) != 0, res)), 0, res)
+                assert len([x for x in res if len(x) != 0]) == 0, res
 
         # check log for no repair happened for gcable data
         out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync for cf1".format(cluster.address_regex(), cluster.address_regex()))
-        self.assertEqual(len(out_of_sync_logs), 0, "GC-able data does not need to be repaired with empty data: " + str([elt[0] for elt in out_of_sync_logs]))
+        assert len(out_of_sync_logs) == 0, "GC-able data does not need to be repaired with empty data: " + str([elt[0] for elt in out_of_sync_logs])
         # check log for actual repair for non gcable data
         out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync for cf2".format(cluster.address_regex(), cluster.address_regex()))
-        self.assertGreater(len(out_of_sync_logs), 0, "Non GC-able data should be repaired")
+        assert len(out_of_sync_logs) > 0, "Non GC-able data should be repaired"
 
     def _range_tombstone_digest(self, sequential):
         """
@@ -595,9 +596,9 @@ class TestRepair(BaseRepairTest):
 
         # check log for no repair happened for gcable data
         out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync for table1".format(cluster.address_regex(), cluster.address_regex()))
-        self.assertEqual(len(out_of_sync_logs), 0, "Digest mismatch for range tombstone: {}".format(str([elt[0] for elt in out_of_sync_logs])))
+        assert len(out_of_sync_logs) == 0, "Digest mismatch for range tombstone: {}".format(str([elt[0] for elt in out_of_sync_logs]))
 
-    def local_dc_repair_test(self):
+    def test_local_dc_repair(self):
         """
         * Set up a multi DC cluster
         * Perform a -local repair on one DC
@@ -607,25 +608,25 @@ class TestRepair(BaseRepairTest):
         node1 = cluster.nodes["node1"]
         node2 = cluster.nodes["node2"]
 
-        debug("starting repair...")
+        logger.debug("starting repair...")
         opts = ["-local"]
         opts += _repair_options(self.cluster.version(), ks="ks")
         node1.repair(opts)
 
         # Verify that only nodes in dc1 are involved in repair
         out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
-        self.assertEqual(len(out_of_sync_logs), 1, "Lines matching: {}".format(len(out_of_sync_logs)))
+        assert len(out_of_sync_logs) == 1, "Lines matching: {}".format(len(out_of_sync_logs))
 
         line, m = out_of_sync_logs[0]
         num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
 
-        self.assertEqual(int(num_out_of_sync_ranges), 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, line))
+        assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
         valid_out_of_sync_pairs = {node1.address(), node2.address()}
-        self.assertEqual(out_of_sync_nodes, valid_out_of_sync_pairs, "Unrelated node found in local repair: {}, expected {}".format(out_of_sync_nodes, valid_out_of_sync_pairs))
+        assert out_of_sync_nodes == valid_out_of_sync_pairs, "Unrelated node found in local repair: {}, expected {}".format(out_of_sync_nodes, valid_out_of_sync_pairs)
         # Check node2 now has the key
         self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
 
-    def dc_repair_test(self):
+    def test_dc_repair(self):
         """
         * Set up a multi DC cluster
         * Perform a -dc repair on two dc's
@@ -636,26 +637,26 @@ class TestRepair(BaseRepairTest):
         node2 = cluster.nodes["node2"]
         node3 = cluster.nodes["node3"]
 
-        debug("starting repair...")
+        logger.debug("starting repair...")
         opts = ["-dc", "dc1", "-dc", "dc2"]
         opts += _repair_options(self.cluster.version(), ks="ks")
         node1.repair(opts)
 
         # Verify that only nodes in dc1 and dc2 are involved in repair
         out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
-        self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]))
+        assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
         valid_out_of_sync_pairs = [{node1.address(), node2.address()},
                                    {node2.address(), node3.address()}]
 
         for line, m in out_of_sync_logs:
             num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
-            self.assertEqual(int(num_out_of_sync_ranges), 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, line))
-            self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
+            assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes , num_out_of_sync_ranges)
+            assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
 
         # Check node2 now has the key
         self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
 
-    def dc_parallel_repair_test(self):
+    def test_dc_parallel_repair(self):
         """
         * Set up a multi DC cluster
         * Perform a -dc repair on two dc's, with -dcpar
@@ -666,30 +667,30 @@ class TestRepair(BaseRepairTest):
         node2 = cluster.nodes["node2"]
         node3 = cluster.nodes["node3"]
 
-        debug("starting repair...")
+        logger.debug("starting repair...")
         opts = ["-dc", "dc1", "-dc", "dc2", "-dcpar"]
         opts += _repair_options(self.cluster.version(), ks="ks", sequential=False)
         node1.repair(opts)
 
         # Verify that only nodes in dc1 and dc2 are involved in repair
         out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
-        self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]))
+        assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
         valid_out_of_sync_pairs = [{node1.address(), node2.address()},
                                    {node2.address(), node3.address()}]
 
         for line, m in out_of_sync_logs:
             num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
-            self.assertEqual(int(num_out_of_sync_ranges), 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, line))
-            self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
+            assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
+            assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
 
         # Check node2 now has the key
         self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
 
         # Check the repair was a dc parallel repair
         if self.cluster.version() >= '2.2':
-            self.assertEqual(len(node1.grep_log('parallelism: dc_parallel')), 1, str(node1.grep_log('parallelism')))
+            assert len(node1.grep_log('parallelism: dc_parallel')) == 1, str(node1.grep_log('parallelism'))
         else:
-            self.assertEqual(len(node1.grep_log('parallelism=PARALLEL')), 1, str(node1.grep_log('parallelism')))
+            assert len(node1.grep_log('parallelism=PARALLEL')) == 1, str(node1.grep_log('parallelism'))
 
     def _setup_multi_dc(self):
         """
@@ -702,7 +703,7 @@ class TestRepair(BaseRepairTest):
         # interfer with the test (this must be after the populate)
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         # populate 2 nodes in dc1, and one node each in dc2 and dc3
         cluster.populate([2, 1, 1]).start(wait_for_binary_proto=True)
 
@@ -713,19 +714,19 @@ class TestRepair(BaseRepairTest):
         create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
 
         # Insert 1000 keys, kill node 2, insert 1 key, restart node 2, insert 1000 more keys
-        debug("Inserting data...")
+        logger.debug("Inserting data...")
         insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)
         node2.flush()
         node2.stop(wait_other_notice=True)
         insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.THREE)
         node2.start(wait_for_binary_proto=True, wait_other_notice=True)
         node1.watch_log_for_alive(node2)
-        insert_c1c2(session, keys=range(1001, 2001), consistency=ConsistencyLevel.ALL)
+        insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL)
 
         cluster.flush()
 
         # Verify that only node2 has only 2000 keys and others have 2001 keys
-        debug("Checking data...")
+        logger.debug("Checking data...")
         self.check_rows_on_node(node2, 2000, missings=[1000])
         for node in [node1, node3, node4]:
             self.check_rows_on_node(node, 2001, found=[1000])
@@ -739,7 +740,7 @@ class TestRepair(BaseRepairTest):
         Tests that multiple parallel repairs on the same table isn't
         causing reference leaks.
         """
-        self.ignore_log_patterns = [
+        self.fixture_dtest_setup.ignore_log_patterns = [
             "Cannot start multiple repair sessions over the same sstables",  # The message we are expecting
             "Validation failed in",                                          # Expecting validation to fail
             "RMI Runtime",                                                   # JMX Repair failures
@@ -749,7 +750,7 @@ class TestRepair(BaseRepairTest):
         ]
 
         cluster = self.cluster
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate([3]).start(wait_for_binary_proto=True)
         node1, node2, node3 = cluster.nodelist()
         node1.stress(stress_options=['write', 'n=10k', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50'])
@@ -771,10 +772,10 @@ class TestRepair(BaseRepairTest):
             if len(node.grep_log("Cannot start multiple repair sessions over the same sstables")) > 0:
                 found_message = True
                 break
-        self.assertTrue(found_message)
+        assert found_message
 
-    @no_vnodes()
-    def token_range_repair_test(self):
+    @pytest.mark.no_vnodes
+    def test_token_range_repair(self):
         """
         Test repair using the -st and -et options
         * Launch a three node cluster
@@ -786,15 +787,15 @@ class TestRepair(BaseRepairTest):
         cluster = self.cluster
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate(3).start(wait_for_binary_proto=True)
 
         node1, node2, node3 = cluster.nodelist()
 
         self._parameterized_range_repair(repair_opts=['-st', str(node3.initial_token), '-et', str(node1.initial_token)])
 
-    @no_vnodes()
-    def token_range_repair_test_with_cf(self):
+    @pytest.mark.no_vnodes
+    def test_token_range_repair_with_cf(self):
         """
         @jira_ticket CASSANDRA-11866
 
@@ -810,13 +811,13 @@ class TestRepair(BaseRepairTest):
         cluster = self.cluster
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate(3).start(wait_for_binary_proto=True)
 
         node1, node2, node3 = cluster.nodelist()
 
         # Insert data, kill node 2, insert more data, restart node 2, insert another set of data
-        debug("Inserting data...")
+        logger.debug("Inserting data...")
         node1.stress(['write', 'n=1k', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
         node2.flush()
         node2.stop(wait_other_notice=True)
@@ -829,25 +830,25 @@ class TestRepair(BaseRepairTest):
         opts = ['-st', str(node3.initial_token), '-et', str(node1.initial_token), ]
         opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='counter1', sequential=False)
         node1.repair(opts)
-        self.assertEqual(len(node1.grep_log('are consistent for standard1')), 0, "Nodes 1 and 2 should not be consistent.")
-        self.assertEqual(len(node3.grep_log('Repair command')), 0, "Node 3 should not have been involved in the repair.")
+        assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
+        assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
         out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
-        self.assertEqual(len(out_of_sync_logs), 0, "We repaired the wrong CF, so things should still be broke")
+        assert len(out_of_sync_logs) == 0, "We repaired the wrong CF == so things should still be broke"
 
         # Repair only the range node 1 owns on the right  CF, assert everything is fixed
         opts = ['-st', str(node3.initial_token), '-et', str(node1.initial_token), ]
         opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
         node1.repair(opts)
-        self.assertEqual(len(node1.grep_log('are consistent for standard1')), 0, "Nodes 1 and 2 should not be consistent.")
-        self.assertEqual(len(node3.grep_log('Repair command')), 0, "Node 3 should not have been involved in the repair.")
+        assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
+        assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
         out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
         _, matches = out_of_sync_logs[0]
         out_of_sync_nodes = {matches.group(1), matches.group(2)}
         valid_out_of_sync_pairs = [{node1.address(), node2.address()}]
-        self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
+        assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
 
-    @no_vnodes()
-    def partitioner_range_repair_test(self):
+    @pytest.mark.no_vnodes
+    def test_partitioner_range_repair(self):
         """
         Test repair using the -pr option
         * Launch a three node cluster
@@ -859,7 +860,7 @@ class TestRepair(BaseRepairTest):
         cluster = self.cluster
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate(3).start(wait_for_binary_proto=True)
 
         node1, node2, node3 = cluster.nodelist()
@@ -867,8 +868,8 @@ class TestRepair(BaseRepairTest):
         self._parameterized_range_repair(repair_opts=['-pr'])
 
     @since('3.10')
-    @no_vnodes()
-    def pull_repair_test(self):
+    @pytest.mark.no_vnodes
+    def test_pull_repair(self):
         """
         Test repair using the --pull option
         @jira_ticket CASSANDRA-9876
@@ -883,7 +884,7 @@ class TestRepair(BaseRepairTest):
         cluster = self.cluster
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate(3).start(wait_for_binary_proto=True)
 
         node1, node2, node3 = cluster.nodelist()
@@ -894,14 +895,14 @@ class TestRepair(BaseRepairTest):
         self._parameterized_range_repair(repair_opts=['--pull', '--in-hosts', node1_address + ',' + node2_address, '-st', str(node3.initial_token), '-et', str(node1.initial_token)])
 
         # Node 1 should only receive files (as we ran a pull repair on node1)
-        self.assertTrue(len(node1.grep_log("Receiving [1-9][0-9]* files")) > 0)
-        self.assertEqual(len(node1.grep_log("sending [1-9][0-9]* files")), 0)
-        self.assertTrue(len(node1.grep_log("sending 0 files")) > 0)
+        assert len(node1.grep_log("Receiving [1-9][0-9]* files")) > 0
+        assert len(node1.grep_log("sending [1-9][0-9]* files")) == 0
+        assert len(node1.grep_log("sending 0 files")) > 0
 
         # Node 2 should only send files (as we ran a pull repair on node1)
-        self.assertEqual(len(node2.grep_log("Receiving [1-9][0-9]* files")), 0)
-        self.assertTrue(len(node2.grep_log("Receiving 0 files")) > 0)
-        self.assertTrue(len(node2.grep_log("sending [1-9][0-9]* files")) > 0)
+        assert len(node2.grep_log("Receiving [1-9][0-9]* files")) == 0
+        assert len(node2.grep_log("Receiving 0 files")) > 0
+        assert len(node2.grep_log("sending [1-9][0-9]* files")) > 0
 
     def _parameterized_range_repair(self, repair_opts):
         """
@@ -916,7 +917,7 @@ class TestRepair(BaseRepairTest):
         node1, node2, node3 = cluster.nodelist()
 
         # Insert data, kill node 2, insert more data, restart node 2, insert another set of data
-        debug("Inserting data...")
+        logger.debug("Inserting data...")
         node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
 
         node2.flush()
@@ -934,8 +935,8 @@ class TestRepair(BaseRepairTest):
         opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
         node1.repair(opts)
 
-        self.assertEqual(len(node1.grep_log('are consistent for standard1')), 0, "Nodes 1 and 2 should not be consistent.")
-        self.assertEqual(len(node3.grep_log('Repair command')), 0, "Node 3 should not have been involved in the repair.")
+        assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
+        assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
 
         out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
         _, matches = out_of_sync_logs[0]
@@ -943,10 +944,10 @@ class TestRepair(BaseRepairTest):
 
         valid_out_of_sync_pairs = [{node1.address(), node2.address()}]
 
-        self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
+        assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
 
     @since('2.2')
-    def trace_repair_test(self):
+    def test_trace_repair(self):
         """
         * Launch a three node cluster
         * Insert some data at RF 2
@@ -957,12 +958,12 @@ class TestRepair(BaseRepairTest):
         cluster = self.cluster
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate(3).start(wait_for_binary_proto=True)
 
         node1, node2, node3 = cluster.nodelist()
 
-        debug("Inserting data...")
+        logger.debug("Inserting data...")
         node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
 
         node2.flush()
@@ -984,12 +985,11 @@ class TestRepair(BaseRepairTest):
         rows = list(session.execute("SELECT activity FROM system_traces.events"))
         # This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is
         # the repair task triggered in the test.
-        self.assertIn('job threads: {}'.format(job_thread_count),
-                      rows[0][0],
-                      'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0]))
+        assert 'job threads: {}'.format(job_thread_count) in rows[0][0], \
+            'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0])
 
     @since('2.2')
-    def thread_count_repair_test(self):
+    def test_thread_count_repair(self):
         """
         * Launch a three node cluster
         * Insert some data at RF 2
@@ -1001,14 +1001,14 @@ class TestRepair(BaseRepairTest):
         cluster = self.cluster
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.populate(3).start(wait_for_binary_proto=True)
 
         node1, node2, node3 = cluster.nodelist()
 
         # Valid job thread counts: 1, 2, 3, and 4
         for job_thread_count in range(1, 5):
-            debug("Inserting data...")
+            logger.debug("Inserting data...")
             node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate',
                           'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count - 1), 2 * job_thread_count)])
 
@@ -1032,11 +1032,10 @@ class TestRepair(BaseRepairTest):
             rows = list(session.execute("SELECT activity FROM system_traces.events"))
             # This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is
             # the repair task triggered in the test.
-            self.assertIn('job threads: {}'.format(job_thread_count),
-                          rows[0][0],
-                          'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0]))
+            assert 'job threads: {}'.format(job_thread_count) in rows[0][0], \
+                'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0])
 
-    @no_vnodes()
+    @pytest.mark.no_vnodes
     def test_multiple_concurrent_repairs(self):
         """
         @jira_ticket CASSANDRA-11451
@@ -1061,7 +1060,7 @@ class TestRepair(BaseRepairTest):
         node1.stop(wait_other_notice=True)
         node3.stop(wait_other_notice=True)
         _, _, rc = node2.stress(['read', 'n=1M', 'no-warmup', '-rate', 'threads=30'], whitelist=True)
-        self.assertEqual(rc, 0)
+        assert rc == 0
 
     @since('4.0')
     def test_wide_row_repair(self):
@@ -1075,7 +1074,7 @@ class TestRepair(BaseRepairTest):
         node1, node2 = cluster.nodelist()
         node2.stop(wait_other_notice=True)
         profile_path = os.path.join(os.getcwd(), 'stress_profiles/repair_wide_rows.yaml')
-        print("yaml = " + profile_path)
+        logger.info(("yaml = " + profile_path))
         node1.stress(['user', 'profile=' + profile_path, 'n=50', 'ops(insert=1)', 'no-warmup', '-rate', 'threads=8',
                       '-insert', 'visits=FIXED(100K)', 'revisit=FIXED(100K)'])
         node2.start(wait_for_binary_proto=True)
@@ -1101,12 +1100,12 @@ class TestRepair(BaseRepairTest):
             node1.watch_log_for('requesting merkle trees', filename='system.log')
             time.sleep(2)
 
-        debug("stopping node1")
+        logger.debug("stopping node1")
         node1.stop(gently=False, wait_other_notice=True)
         t1.join()
-        debug("starting node1 - first repair should have failed")
+        logger.debug("starting node1 - first repair should have failed")
         node1.start(wait_for_binary_proto=True, wait_other_notice=True)
-        debug("running second repair")
+        logger.debug("running second repair")
         if cluster.version() >= "2.2":
             node1.repair()
         else:
@@ -1126,7 +1125,7 @@ class TestRepair(BaseRepairTest):
         """
         self._test_failure_during_repair(phase='sync', initiator=False,)
 
-    @since('2.2', '4')
+    @since('2.2', max_version='4')
     def test_failure_during_anticompaction(self):
         """
         @jira_ticket CASSANDRA-12901
@@ -1144,48 +1143,49 @@ class TestRepair(BaseRepairTest):
         cluster = self.cluster
         # We are not interested in specific errors, but
         # that the repair session finishes on node failure without hanging
-        self.ignore_log_patterns = [
+        self.fixture_dtest_setup.ignore_log_patterns = [
             "Endpoint .* died",
             "Streaming error occurred",
             "StreamReceiveTask",
             "Stream failed",
             "Session completed with the following error",
             "Repair session .* for range .* failed with error",
-            "Sync failed between .* and .*"
+            "Sync failed between .* and .*",
+            "failed to send a stream message/file to peer"
         ]
 
         # Disable hinted handoff and set batch commit log so this doesn't
         # interfere with the test (this must be after the populate)
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
         cluster.set_batch_commitlog(enabled=True)
-        debug("Setting up cluster..")
+        logger.debug("Setting up cluster..")
         cluster.populate(3)
         node1, node2, node3 = cluster.nodelist()
 
         node_to_kill = node2 if (phase == 'sync' and initiator) else node3
-        debug("Setting up byteman on {}".format(node_to_kill.name))
+        logger.debug("Setting up byteman on {}".format(node_to_kill.name))
         # set up byteman
         node_to_kill.byteman_port = '8100'
         node_to_kill.import_config_files()
 
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.start(wait_other_notice=True)
 
-        debug("stopping node3")
+        logger.debug("stopping node3")
         node3.stop(gently=False, wait_other_notice=True)
 
         self.patient_exclusive_cql_connection(node1)
-        debug("inserting data while node3 is down")
+        logger.debug("inserting data while node3 is down")
         node1.stress(stress_options=['write', 'n=1k',
                                      'no-warmup', 'cl=ONE',
                                      '-schema', 'replication(factor=3)',
                                      '-rate', 'threads=10'])
 
-        debug("bring back node3")
+        logger.debug("bring back node3")
         node3.start(wait_other_notice=True, wait_for_binary_proto=True)
 
         script = 'stream_sleep.btm' if phase == 'sync' else 'repair_{}_sleep.btm'.format(phase)
-        debug("Submitting byteman script to {}".format(node_to_kill.name))
+        logger.debug("Submitting byteman script to {}".format(node_to_kill.name))
         # Sleep on anticompaction/stream so there will be time for node to be killed
         node_to_kill.byteman_submit(['./byteman/{}'.format(script)])
 
@@ -1193,15 +1193,15 @@ class TestRepair(BaseRepairTest):
             global nodetool_error
             try:
                 node1.nodetool('repair keyspace1 standard1')
-            except Exception, e:
+            except Exception as e:
                 nodetool_error = e
 
-        debug("repair node1")
+        logger.debug("repair node1")
         # Launch in a external thread so it does not hang process
         t = Thread(target=node1_repair)
         t.start()
 
-        debug("Will kill {} in middle of {}".format(node_to_kill.name, phase))
+        logger.debug("Will kill {} in middle of {}".format(node_to_kill.name, phase))
         msg_to_wait = 'streaming plan for Repair'
         if phase == 'anticompaction':
             msg_to_wait = 'Got anticompaction request'
@@ -1210,10 +1210,10 @@ class TestRepair(BaseRepairTest):
         node_to_kill.watch_log_for(msg_to_wait, filename='debug.log')
         node_to_kill.stop(gently=False, wait_other_notice=True)
 
-        debug("Killed {}, now waiting repair to finish".format(node_to_kill.name))
+        logger.debug("Killed {}, now waiting repair to finish".format(node_to_kill.name))
         t.join(timeout=60)
-        self.assertFalse(t.isAlive(), 'Repair still running after sync {} was killed'
-                                      .format("initiator" if initiator else "participant"))
+        assert not t.is_alive(), 'Repair still running after sync {} was killed'\
+            .format("initiator" if initiator else "participant")
 
         if cluster.version() < '4.0' or phase != 'sync':
             # the log entry we're watching for in the sync task came from the
@@ -1227,7 +1227,7 @@ RepairTableContents = namedtuple('RepairTableContents',
 
 
 @since('2.2')
-@attr("resource-intensive")
+@pytest.mark.resource_intensive
 class TestRepairDataSystemTable(Tester):
     """
     @jira_ticket CASSANDRA-5839
@@ -1237,21 +1237,19 @@ class TestRepairDataSystemTable(Tester):
     to a cluster, then ensuring these tables are in valid states before and
     after running repair.
     """
-
-    def setUp(self):
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_set_cluster_settings(self, fixture_dtest_setup):
         """
         Prepares a cluster for tests of the repair history tables by starting
         a 5-node cluster, then inserting 5000 values with RF=3.
         """
-
-        Tester.setUp(self)
-        self.cluster.populate(5).start(wait_for_binary_proto=True)
+        fixture_dtest_setup.cluster.populate(5).start(wait_for_binary_proto=True)
         self.node1 = self.cluster.nodelist()[0]
-        self.session = self.patient_cql_connection(self.node1)
+        self.session = fixture_dtest_setup.patient_cql_connection(self.node1)
 
         self.node1.stress(stress_options=['write', 'n=5K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)'])
 
-        self.cluster.flush()
+        fixture_dtest_setup.cluster.flush()
 
     def repair_table_contents(self, node, include_system_keyspaces=True):
         """
@@ -1281,15 +1279,15 @@ class TestRepairDataSystemTable(Tester):
         return RepairTableContents(parent_repair_history=parent_repair_history,
                                    repair_history=repair_history)
 
-    @skip('hangs CI')
-    def initial_empty_repair_tables_test(self):
-        debug('repair tables:')
-        debug(self.repair_table_contents(node=self.node1, include_system_keyspaces=False))
+    @pytest.mark.skip(reason='hangs CI')
+    def test_initial_empty_repair_tables(self):
+        logger.debug('repair tables:')
+        logger.debug(self.repair_table_contents(node=self.node1, include_system_keyspaces=False))
         repair_tables_dict = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)._asdict()
-        for table_name, table_contents in repair_tables_dict.items():
-            self.assertFalse(table_contents, '{} is non-empty'.format(table_name))
+        for table_name, table_contents in list(repair_tables_dict.items()):
+            assert not table_contents, '{} is non-empty'.format(table_name)
 
-    def repair_parent_table_test(self):
+    def test_repair_parent_table(self):
         """
         Test that `system_distributed.parent_repair_history` is properly populated
         after repair by:
@@ -1299,9 +1297,9 @@ class TestRepairDataSystemTable(Tester):
         """
         self.node1.repair()
         parent_repair_history, _ = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)
-        self.assertTrue(len(parent_repair_history))
+        assert len(parent_repair_history)
 
-    def repair_table_test(self):
+    def test_repair_table(self):
         """
         Test that `system_distributed.repair_history` is properly populated
         after repair by:
@@ -1311,4 +1309,4 @@ class TestRepairDataSystemTable(Tester):
         """
         self.node1.repair()
         _, repair_history = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)
-        self.assertTrue(len(repair_history))
+        assert len(repair_history)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/replace_address_test.py
----------------------------------------------------------------------
diff --git a/replace_address_test.py b/replace_address_test.py
index 2b60077..31c1394 100644
--- a/replace_address_test.py
+++ b/replace_address_test.py
@@ -1,18 +1,24 @@
 import os
 import tempfile
+import pytest
+import logging
+import time
+
+from flaky import flaky
+
 from itertools import chain
 from shutil import rmtree
-from unittest import skipIf
 
 from cassandra import ConsistencyLevel, ReadTimeout, Unavailable
 from cassandra.query import SimpleStatement
 from ccmlib.node import Node
-from nose.plugins.attrib import attr
 
-from dtest import CASSANDRA_VERSION_FROM_BUILD, DISABLE_VNODES, Tester, debug
+from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester
 from tools.assertions import assert_bootstrap_state, assert_all, assert_not_running
 from tools.data import rows_to_list
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class NodeUnavailable(Exception):
@@ -20,24 +26,26 @@ class NodeUnavailable(Exception):
 
 
 class BaseReplaceAddressTest(Tester):
-    __test__ = False
-    replacement_node = None
-    ignore_log_patterns = (
-        # This one occurs when trying to send the migration to a
-        # node that hasn't started yet, and when it does, it gets
-        # replayed and everything is fine.
-        r'Can\'t send migration request: node.*is down',
-        r'Migration task failed to complete',  # 10978
-        # ignore streaming error during bootstrap
-        r'Streaming error occurred',
-        r'failed stream session'
-    )
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            # This one occurs when trying to send the migration to a
+            # node that hasn't started yet, and when it does, it gets
+            # replayed and everything is fine.
+            r'Can\'t send migration request: node.*is down',
+            r'Migration task failed to complete',  # 10978
+            # ignore streaming error during bootstrap
+            r'Streaming error occurred',
+            r'failed stream session',
+            r'Failed to properly handshake with peer'
+        )
 
     def _setup(self, n=3, opts=None, enable_byteman=False, mixed_versions=False):
-        debug("Starting cluster with {} nodes.".format(n))
-        self.cluster.populate(n, use_vnodes=not DISABLE_VNODES)
+        logger.debug("Starting cluster with {} nodes.".format(n))
+        self.cluster.populate(n)
         if opts is not None:
-            debug("Setting cluster options: {}".format(opts))
+            logger.debug("Setting cluster options: {}".format(opts))
             self.cluster.set_configuration_options(opts)
 
         self.cluster.set_batch_commitlog(enabled=True)
@@ -46,7 +54,7 @@ class BaseReplaceAddressTest(Tester):
 
         self.cluster.seeds.remove(self.replaced_node)
         NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')
-        if DISABLE_VNODES:
+        if not self.dtest_config.use_vnodes:
             self.cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1})
         else:
             self.cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})
@@ -57,7 +65,7 @@ class BaseReplaceAddressTest(Tester):
             self.query_node.import_config_files()
 
         if mixed_versions:
-            debug("Starting nodes on version 2.2.4")
+            logger.debug("Starting nodes on version 2.2.4")
             self.cluster.set_install_dir(version="2.2.4")
 
         self.cluster.start()
@@ -83,12 +91,12 @@ class BaseReplaceAddressTest(Tester):
                 replacement_address = self.replaced_node.address()
                 self.cluster.remove(self.replaced_node)
 
-            debug("Starting replacement node {} with jvm_option '{}={}'".format(replacement_address, jvm_option, replace_address))
+            logger.debug("Starting replacement node {} with jvm_option '{}={}'".format(replacement_address, jvm_option, replace_address))
             self.replacement_node = Node('replacement', cluster=self.cluster, auto_bootstrap=True,
                                          thrift_interface=None, storage_interface=(replacement_address, 7000),
                                          jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042))
             if opts is not None:
-                debug("Setting options on replacement node: {}".format(opts))
+                logger.debug("Setting options on replacement node: {}".format(opts))
                 self.replacement_node.set_configuration_options(opts)
             self.cluster.add(self.replacement_node, False, data_center=data_center)
 
@@ -107,39 +115,41 @@ class BaseReplaceAddressTest(Tester):
 
     def _stop_node_to_replace(self, gently=False, table='keyspace1.standard1', cl=ConsistencyLevel.THREE):
         if self.replaced_node.is_running():
-            debug("Stopping {}".format(self.replaced_node.name))
+            logger.debug("Stopping {}".format(self.replaced_node.name))
             self.replaced_node.stop(gently=gently, wait_other_notice=True)
 
-        debug("Testing node stoppage (query should fail).")
-        with self.assertRaises((Unavailable, ReadTimeout)):
+        logger.debug("Testing node stoppage (query should fail).")
+        with pytest.raises((Unavailable, ReadTimeout)):
             session = self.patient_cql_connection(self.query_node)
             query = SimpleStatement('select * from {}'.format(table), consistency_level=cl)
             session.execute(query)
 
     def _insert_data(self, n='1k', rf=3, whitelist=False):
-        debug("Inserting {} entries with rf={} with stress...".format(n, rf))
-        self.query_node.stress(['write', 'n={}'.format(n), 'no-warmup', '-schema', 'replication(factor={})'.format(rf)],
+        logger.debug("Inserting {} entries with rf={} with stress...".format(n, rf))
+        self.query_node.stress(['write', 'n={}'.format(n), 'no-warmup', '-schema', 'replication(factor={})'.format(rf),
+                                '-rate', 'threads=10'],
                                whitelist=whitelist)
         self.cluster.flush()
+        time.sleep(20)
 
     def _fetch_initial_data(self, table='keyspace1.standard1', cl=ConsistencyLevel.THREE, limit=10000):
-        debug("Fetching initial data from {} on {} with CL={} and LIMIT={}".format(table, self.query_node.name, cl, limit))
+        logger.debug("Fetching initial data from {} on {} with CL={} and LIMIT={}".format(table, self.query_node.name, cl, limit))
         session = self.patient_cql_connection(self.query_node)
         query = SimpleStatement('select * from {} LIMIT {}'.format(table, limit), consistency_level=cl)
-        return rows_to_list(session.execute(query))
+        return rows_to_list(session.execute(query, timeout=20))
 
     def _verify_data(self, initial_data, table='keyspace1.standard1', cl=ConsistencyLevel.ONE, limit=10000,
                      restart_nodes=False):
-        self.assertGreater(len(initial_data), 0, "Initial data must be greater than 0")
+        assert len(initial_data) > 0, "Initial data must be greater than 0"
 
         # query should work again
-        debug("Stopping old nodes")
+        logger.debug("Stopping old nodes")
         for node in self.cluster.nodelist():
             if node.is_running() and node != self.replacement_node:
-                debug("Stopping {}".format(node.name))
+                logger.debug("Stopping {}".format(node.name))
                 node.stop(gently=False, wait_other_notice=True)
 
-        debug("Verifying {} on {} with CL={} and LIMIT={}".format(table, self.replacement_node.address(), cl, limit))
+        logger.debug("Verifying {} on {} with CL={} and LIMIT={}".format(table, self.replacement_node.address(), cl, limit))
         session = self.patient_exclusive_cql_connection(self.replacement_node)
         assert_all(session, 'select * from {} LIMIT {}'.format(table, limit),
                    expected=initial_data,
@@ -166,22 +176,22 @@ class BaseReplaceAddressTest(Tester):
                                    timeout=60)
 
     def _verify_tokens_migrated_successfully(self, previous_log_size=None):
-        if DISABLE_VNODES:
+        if not self.dtest_config.use_vnodes:
             num_tokens = 1
         else:
             # a little hacky but grep_log returns the whole line...
             num_tokens = int(self.replacement_node.get_conf_option('num_tokens'))
 
-        debug("Verifying {} tokens migrated sucessfully".format(num_tokens))
+        logger.debug("Verifying {} tokens migrated sucessfully".format(num_tokens))
         logs = self.replacement_node.grep_log(r"Token (.*?) changing ownership from /{} to /{}"
                                               .format(self.replaced_node.address(),
                                                       self.replacement_node.address()))
         if (previous_log_size is not None):
-            self.assertEquals(len(logs), previous_log_size)
+            assert len(logs) == previous_log_size
 
         moved_tokens = set([l[1].group(1) for l in logs])
-        debug("number of moved tokens: {}".format(len(moved_tokens)))
-        self.assertEquals(len(moved_tokens), num_tokens)
+        logger.debug("number of moved tokens: {}".format(len(moved_tokens)))
+        assert len(moved_tokens) == num_tokens
 
         return len(logs)
 
@@ -197,11 +207,11 @@ class BaseReplaceAddressTest(Tester):
         self._stop_node_to_replace()
 
         if mixed_versions:
-            debug("Upgrading all except {} to current version".format(self.query_node.address()))
+            logger.debug("Upgrading all except {} to current version".format(self.query_node.address()))
             self.cluster.set_install_dir(install_dir=default_install_dir)
             for node in self.cluster.nodelist():
                 if node.is_running() and node != self.query_node:
-                    debug("Upgrading {} to current version".format(node.address()))
+                    logger.debug("Upgrading {} to current version".format(node.address()))
                     node.stop(gently=True, wait_other_notice=True)
                     node.start(wait_other_notice=True, wait_for_binary_proto=True)
 
@@ -216,7 +226,7 @@ class BaseReplaceAddressTest(Tester):
         if not same_address and not mixed_versions:
             initial_data = self._fetch_initial_data(cl=ConsistencyLevel.TWO)
 
-        debug("Joining replaced node")
+        logger.debug("Joining replaced node")
         self.replacement_node.nodetool("join")
 
         if not same_address:
@@ -229,33 +239,32 @@ class BaseReplaceAddressTest(Tester):
 
 
 class TestReplaceAddress(BaseReplaceAddressTest):
-    __test__ = True
 
-    @attr('resource-intensive')
-    def replace_stopped_node_test(self):
+    @pytest.mark.resource_intensive
+    def test_replace_stopped_node(self):
         """
         Test that we can replace a node that is not shutdown gracefully.
         """
         self._test_replace_node(gently=False)
 
-    @attr('resource-intensive')
-    def replace_shutdown_node_test(self):
+    @pytest.mark.resource_intensive
+    def test_replace_shutdown_node(self):
         """
         @jira_ticket CASSANDRA-9871
         Test that we can replace a node that is shutdown gracefully.
         """
         self._test_replace_node(gently=True)
 
-    @attr('resource-intensive')
-    def replace_stopped_node_same_address_test(self):
+    @pytest.mark.resource_intensive
+    def test_replace_stopped_node_same_address(self):
         """
         @jira_ticket CASSANDRA-8523
         Test that we can replace a node with the same address correctly
         """
         self._test_replace_node(gently=False, same_address=True)
 
-    @attr('resource-intensive')
-    def replace_first_boot_test(self):
+    @pytest.mark.resource_intensive
+    def test_replace_first_boot(self):
         self._test_replace_node(jvm_option='replace_address_first_boot')
 
     def _test_replace_node(self, gently=False, jvm_option='replace_address', same_address=False):
@@ -288,32 +297,35 @@ class TestReplaceAddress(BaseReplaceAddressTest):
 
         self._verify_data(initial_data)
 
-    @attr('resource-intensive')
-    def replace_active_node_test(self):
-        self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Exception encountered during startup']
+    @pytest.mark.resource_intensive
+    def test_replace_active_node(self):
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            r'Exception encountered during startup']
+
         self._setup(n=3)
         self._do_replace(wait_for_binary_proto=False)
 
-        debug("Waiting for replace to fail")
+        logger.debug("Waiting for replace to fail")
         self.replacement_node.watch_log_for("java.lang.UnsupportedOperationException: Cannot replace a live node...")
         assert_not_running(self.replacement_node)
 
-    @attr('resource-intensive')
-    def replace_nonexistent_node_test(self):
-        self.ignore_log_patterns = list(self.ignore_log_patterns) + [
+    @pytest.mark.resource_intensive
+    def test_replace_nonexistent_node(self):
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
             # This is caused by starting a node improperly (replacing active/nonexistent)
             r'Exception encountered during startup',
             # This is caused by trying to replace a nonexistent node
             r'Exception in thread Thread']
+
         self._setup(n=3)
         self._do_replace(replace_address='127.0.0.5', wait_for_binary_proto=False)
 
-        debug("Waiting for replace to fail")
+        logger.debug("Waiting for replace to fail")
         self.replacement_node.watch_log_for("java.lang.RuntimeException: Cannot replace_address /127.0.0.5 because it doesn't exist in gossip")
         assert_not_running(self.replacement_node)
 
     @since('3.6')
-    def fail_without_replace_test(self):
+    def test_fail_without_replace(self):
         """
         When starting a node from a clean slate with the same address as
         an existing down node, the node should error out even when
@@ -321,14 +333,16 @@ class TestReplaceAddress(BaseReplaceAddressTest):
         to use replace_address.
         @jira_ticket CASSANDRA-10134
         """
-        self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Exception encountered during startup']
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            r'Exception encountered during startup']
+
         self._setup(n=3)
         self._insert_data()
         node1, node2, node3 = self.cluster.nodelist()
 
         mark = None
         for auto_bootstrap in (True, False):
-            debug("Stopping node 3.")
+            logger.debug("Stopping node 3.")
             node3.stop(gently=False)
 
             # completely delete the data, commitlog, and saved caches
@@ -339,13 +353,13 @@ class TestReplaceAddress(BaseReplaceAddressTest):
                     rmtree(d)
 
             node3.set_configuration_options(values={'auto_bootstrap': auto_bootstrap})
-            debug("Starting node 3 with auto_bootstrap = {val}".format(val=auto_bootstrap))
+            logger.debug("Starting node 3 with auto_bootstrap = {val}".format(val=auto_bootstrap))
             node3.start(wait_other_notice=False)
             node3.watch_log_for('Use cassandra.replace_address if you want to replace this node', from_mark=mark, timeout=20)
             mark = node3.mark_log()
 
     @since('3.6')
-    def unsafe_replace_test(self):
+    def test_unsafe_replace(self):
         """
         To handle situations such as failed disk in a JBOD, it may be desirable to
         replace a node without bootstrapping. In such scenarios best practice
@@ -359,14 +373,16 @@ class TestReplaceAddress(BaseReplaceAddressTest):
 
         @jira_ticket CASSANDRA-10134
         """
-        self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Exception encountered during startup']
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            r'Exception encountered during startup']
+
         self._setup(n=3)
         self._insert_data()
         initial_data = self._fetch_initial_data()
         self.replacement_node = self.replaced_node
 
         for set_allow_unsafe_flag in [False, True]:
-            debug("Stopping {}".format(self.replaced_node.name))
+            logger.debug("Stopping {}".format(self.replaced_node.name))
             self.replaced_node.stop(gently=False)
 
             # completely delete the system keyspace data plus commitlog and saved caches
@@ -384,27 +400,27 @@ class TestReplaceAddress(BaseReplaceAddressTest):
             mark = self.replacement_node.mark_log()
 
             if set_allow_unsafe_flag:
-                debug('Starting replacement node with auto_bootstrap = false and replace_address = {} and allow_unsafe_replace = true'.format(self.replaced_node.address()))
+                logger.debug('Starting replacement node with auto_bootstrap = false and replace_address = {} and allow_unsafe_replace = true'.format(self.replaced_node.address()))
                 self._do_replace(extra_jvm_args=['-Dcassandra.allow_unsafe_replace=true'])
                 self._verify_data(initial_data)
             else:
-                debug('Starting replacement node with auto_bootstrap = false and replace_address = {}'.format(self.replaced_node.address()))
+                logger.debug('Starting replacement node with auto_bootstrap = false and replace_address = {}'.format(self.replaced_node.address()))
                 self._do_replace(wait_for_binary_proto=False)
                 self.replacement_node.watch_log_for('To perform this operation, please restart with -Dcassandra.allow_unsafe_replace=true',
                                                     from_mark=mark, timeout=20)
 
-    @skipIf(CASSANDRA_VERSION_FROM_BUILD == '3.9', "Test doesn't run on 3.9")
+    @pytest.mark.skipif(CASSANDRA_VERSION_FROM_BUILD == '3.9', reason="Test doesn't run on 3.9")
     @since('2.2')
-    def insert_data_during_replace_same_address_test(self):
+    def test_insert_data_during_replace_same_address(self):
         """
         Test that replacement node with same address DOES NOT receive writes during replacement
         @jira_ticket CASSANDRA-8523
         """
         self._test_insert_data_during_replace(same_address=True)
 
-    @skipIf(CASSANDRA_VERSION_FROM_BUILD == '3.9', "Test doesn't run on 3.9")
+    @pytest.mark.skipif(CASSANDRA_VERSION_FROM_BUILD == '3.9', reason="Test doesn't run on 3.9")
     @since('2.2')
-    def insert_data_during_replace_different_address_test(self):
+    def test_insert_data_during_replace_different_address(self):
         """
         Test that replacement node with different address DOES receive writes during replacement
         @jira_ticket CASSANDRA-8523
@@ -412,8 +428,8 @@ class TestReplaceAddress(BaseReplaceAddressTest):
         self._test_insert_data_during_replace(same_address=False)
 
     @since('2.2')
-    @attr('resource-intensive')
-    def resume_failed_replace_test(self):
+    @pytest.mark.resource_intensive
+    def test_resume_failed_replace(self):
         """
         Test resumable bootstrap while replacing node. Feature introduced in
         2.2 with ticket https://issues.apache.org/jira/browse/CASSANDRA-8838
@@ -423,21 +439,23 @@ class TestReplaceAddress(BaseReplaceAddressTest):
         self._test_restart_failed_replace(mode='resume')
 
     @since('2.2')
-    @attr('resource-intensive')
-    def restart_failed_replace_with_reset_resume_state_test(self):
+    @pytest.mark.resource_intensive
+    def test_restart_failed_replace_with_reset_resume_state(self):
         """Test replace with resetting bootstrap progress"""
         self._test_restart_failed_replace(mode='reset_resume_state')
 
     @since('2.2')
-    @attr('resource-intensive')
-    def restart_failed_replace_test(self):
+    @pytest.mark.resource_intensive
+    def test_restart_failed_replace(self):
         """
         Test that if a node fails to replace, it can join the cluster even if the data is wiped.
         """
         self._test_restart_failed_replace(mode='wipe')
 
     def _test_restart_failed_replace(self, mode):
-        self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Error while waiting on bootstrap to complete']
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            r'Error while waiting on bootstrap to complete']
+
         self._setup(n=3, enable_byteman=True)
         self._insert_data(n="1k")
 
@@ -445,7 +463,7 @@ class TestReplaceAddress(BaseReplaceAddressTest):
 
         self._stop_node_to_replace()
 
-        debug("Submitting byteman script to make stream fail")
+        logger.debug("Submitting byteman script to make stream fail")
 
         if self.cluster.version() < '4.0':
             self.query_node.byteman_submit(['./byteman/pre4.0/stream_failure.btm'])
@@ -460,7 +478,7 @@ class TestReplaceAddress(BaseReplaceAddressTest):
 
         if mode == 'reset_resume_state':
             mark = self.replacement_node.mark_log()
-            debug("Restarting replacement node with -Dcassandra.reset_bootstrap_progress=true")
+            logger.debug("Restarting replacement node with -Dcassandra.reset_bootstrap_progress=true")
             # restart replacement node with resetting bootstrap state
             self.replacement_node.stop()
             self.replacement_node.start(jvm_args=[
@@ -471,7 +489,7 @@ class TestReplaceAddress(BaseReplaceAddressTest):
             # check if we reset bootstrap state
             self.replacement_node.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
         elif mode == 'resume':
-            debug("Resuming failed bootstrap")
+            logger.debug("Resuming failed bootstrap")
             self.replacement_node.nodetool('bootstrap resume')
             # check if we skipped already retrieved ranges
             self.replacement_node.watch_log_for("already available. Skipping streaming.")
@@ -479,37 +497,39 @@ class TestReplaceAddress(BaseReplaceAddressTest):
         elif mode == 'wipe':
             self.replacement_node.stop()
 
-            debug("Waiting other nodes to detect node stopped")
-            self.query_node.watch_log_for("FatClient /{} has been silent for 30000ms, removing from gossip".format(self.replacement_node.address()), timeout=60)
-            self.query_node.watch_log_for("Node /{} failed during replace.".format(self.replacement_node.address()), timeout=60, filename='debug.log')
+            logger.debug("Waiting other nodes to detect node stopped")
+            self.query_node.watch_log_for("FatClient /{} has been silent for 30000ms, removing from gossip".format(self.replacement_node.address()), timeout=120)
+            self.query_node.watch_log_for("Node /{} failed during replace.".format(self.replacement_node.address()), timeout=120, filename='debug.log')
 
-            debug("Restarting node after wiping data")
+            logger.debug("Restarting node after wiping data")
             self._cleanup(self.replacement_node)
             self.replacement_node.start(jvm_args=["-Dcassandra.replace_address_first_boot={}".format(self.replaced_node.address())],
                                         wait_for_binary_proto=True)
         else:
-            raise RuntimeError('invalid mode value {mode}'.format(mode))
+            raise RuntimeError('invalid mode value {mode}'.format(mode=mode))
 
         # check if bootstrap succeeded
         assert_bootstrap_state(self, self.replacement_node, 'COMPLETED')
 
-        debug("Bootstrap finished successully, verifying data.")
+        logger.debug("Bootstrap finished successully, verifying data.")
 
         self._verify_data(initial_data)
 
-    def replace_with_insufficient_replicas_test(self):
+    def test_replace_with_insufficient_replicas(self):
         """
         Test that replace fails when there are insufficient replicas
         @jira_ticket CASSANDRA-11848
         """
-        self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Unable to find sufficient sources for streaming range']
+        self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
+            r'Unable to find sufficient sources for streaming range']
+
         self._setup(n=3)
         self._insert_data(rf=2)
 
         self._stop_node_to_replace()
 
         # stop other replica
-        debug("Stopping other replica")
+        logger.debug("Stopping other replica")
         self.query_node.stop(wait_other_notice=True)
 
         self._do_replace(wait_for_binary_proto=False, wait_other_notice=False)
@@ -518,7 +538,9 @@ class TestReplaceAddress(BaseReplaceAddressTest):
         self.replacement_node.watch_log_for("Unable to find sufficient sources for streaming range")
         assert_not_running(self.replacement_node)
 
-    def multi_dc_replace_with_rf1_test(self):
+    @flaky
+    @pytest.mark.vnodes
+    def test_multi_dc_replace_with_rf1(self):
         """
         Test that multi-dc replace works when rf=1 on each dc
         """
@@ -550,7 +572,11 @@ class TestReplaceAddress(BaseReplaceAddressTest):
             stress_config.write(yaml_config)
             stress_config.flush()
             self.query_node.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',
-                                    'ops(insert=1)', '-rate', 'threads=50'])
+                                    'ops(insert=1)', '-rate', 'threads=5'])
+            # need to sleep for a bit to try and let things catch up as we frequently do a lot of
+            # GC after the stress invocation above causing the next step of the test to timeout.
+            # and then flush to make sure we really are fully caught up
+            time.sleep(30)
 
         # Save initial data
         table_name = 'keyspace1.users'
@@ -563,13 +589,13 @@ class TestReplaceAddress(BaseReplaceAddressTest):
         assert_bootstrap_state(self, self.replacement_node, 'COMPLETED')
 
         # Check that keyspace was replicated from dc1 to dc2
-        self.assertFalse(self.replacement_node.grep_log("Unable to find sufficient sources for streaming range"))
+        assert not self.replacement_node.grep_log("Unable to find sufficient sources for streaming range")
 
         self._verify_data(initial_data, table=table_name, cl=ConsistencyLevel.LOCAL_ONE)
 
     def _cleanup(self, node):
         commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
         for data_dir in node.data_directories():
-            debug("Deleting {}".format(data_dir))
+            logger.debug("Deleting {}".format(data_dir))
             rmtree(data_dir)
         rmtree(commitlog_dir)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[04/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/upgrade_tests/cql_tests.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/cql_tests.py b/upgrade_tests/cql_tests.py
index 267d2a4..f08a141 100644
--- a/upgrade_tests/cql_tests.py
+++ b/upgrade_tests/cql_tests.py
@@ -1,13 +1,13 @@
-# coding: utf-8
-
 import itertools
 import math
 import random
 import struct
 import time
+import pytest
+import logging
+
 from collections import OrderedDict
 from distutils.version import LooseVersion
-from unittest import skip, skipUnless
 from uuid import UUID, uuid4
 
 from cassandra import ConsistencyLevel, InvalidRequest
@@ -15,28 +15,29 @@ from cassandra.concurrent import execute_concurrent_with_args
 from cassandra.protocol import ProtocolException, SyntaxException
 from cassandra.query import SimpleStatement
 from cassandra.util import sortedset
-from nose.exc import SkipTest
-from nose.tools import assert_not_in
 
-from dtest import RUN_STATIC_UPGRADE_MATRIX, debug
-from thrift_bindings.v22.ttypes import \
+from dtest import RUN_STATIC_UPGRADE_MATRIX
+from thrift_bindings.thrift010.ttypes import \
     ConsistencyLevel as ThriftConsistencyLevel
-from thrift_bindings.v22.ttypes import (CfDef, Column, ColumnDef,
+from thrift_bindings.thrift010.ttypes import (CfDef, Column, ColumnDef,
                                         ColumnOrSuperColumn, ColumnParent,
                                         Deletion, Mutation, SlicePredicate,
                                         SliceRange)
-from thrift_tests import get_thrift_client
+from thrift_test import get_thrift_client
 from tools.assertions import (assert_all, assert_invalid, assert_length_equal,
                               assert_none, assert_one, assert_row_count)
 from tools.data import rows_to_list
-from tools.decorators import since
-from upgrade_base import UpgradeTester
-from upgrade_manifest import build_upgrade_pairs
+from .upgrade_base import UpgradeTester
+from .upgrade_manifest import build_upgrade_pairs
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
+@pytest.mark.upgrade_test
 class TestCQL(UpgradeTester):
 
-    def static_cf_test(self):
+    def test_static_cf(self):
         """ Test static CF syntax """
         cursor = self.prepare()
 
@@ -51,7 +52,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE users")
 
             # Inserts
@@ -79,19 +80,18 @@ class TestCQL(UpgradeTester):
             assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 37, None, None], [UUID('550e8400-e29b-41d4-a716-446655440000'), 36, None, None]])
 
     @since('2.0', max_version='3')  # 3.0+ not compatible with protocol version 2
-    def large_collection_errors_test(self):
+    def test_large_collection_errors(self):
         """ For large collections, make sure that we are printing warnings """
-
         for version in self.get_node_versions():
             if version >= '3.0':
-                raise SkipTest('version {} not compatible with protocol version 2'.format(version))
+                pytest.skip('version {} not compatible with protocol version 2'.format(version))
 
         # We only warn with protocol 2
         cursor = self.prepare(protocol_version=2)
 
         cluster = self.cluster
         node1 = cluster.nodelist()[0]
-        self.ignore_log_patterns = ["Detected collection for table"]
+        self.fixture_dtest_setup.ignore_log_patterns = ["Detected collection for table"]
 
         cursor.execute("""
             CREATE TABLE maps (
@@ -101,7 +101,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE maps")
 
             # Insert more than the max, which is 65535
@@ -114,7 +114,7 @@ class TestCQL(UpgradeTester):
                                 "Only the first 65535 elements will be returned to the client. "
                                 "Please see http://cassandra.apache.org/doc/cql3/CQL.html#collections for more details.")
 
-    def noncomposite_static_cf_test(self):
+    def test_noncomposite_static_cf(self):
         """ Test non-composite static CF syntax """
         cursor = self.prepare()
 
@@ -129,7 +129,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE users")
 
             # Inserts
@@ -162,7 +162,7 @@ class TestCQL(UpgradeTester):
             assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 37, None, None],
                                                        [UUID('550e8400-e29b-41d4-a716-446655440000'), 36, None, None]])
 
-    def dynamic_cf_test(self):
+    def test_dynamic_cf(self):
         """ Test non-composite dynamic CF syntax """
         cursor = self.prepare()
 
@@ -176,7 +176,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE clicks")
 
             # Inserts
@@ -199,7 +199,7 @@ class TestCQL(UpgradeTester):
             # Check we don't allow empty values for url since this is the full underlying cell name (#6152)
             assert_invalid(cursor, "INSERT INTO clicks (userid, url, time) VALUES (810e8500-e29b-41d4-a716-446655440000, '', 42)")
 
-    def dense_cf_test(self):
+    def test_dense_cf(self):
         """ Test composite 'dense' CF syntax """
         cursor = self.prepare()
 
@@ -214,7 +214,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE connections")
 
             # Inserts
@@ -257,7 +257,7 @@ class TestCQL(UpgradeTester):
             cursor.execute("DELETE FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
             assert_none(cursor, "SELECT * FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
 
-    def sparse_cf_test(self):
+    def test_sparse_cf(self):
         """ Test composite 'sparse' CF syntax """
         cursor = self.prepare()
 
@@ -273,7 +273,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE timeline")
 
             frodo_id = UUID('550e8400-e29b-41d4-a716-446655440000')
@@ -297,7 +297,7 @@ class TestCQL(UpgradeTester):
                 [24, 'Something something', 'Frodo Baggins'],
                 [30, 'Yet one more message', None]])
 
-    def limit_ranges_test(self):
+    def test_limit_ranges(self):
         """ Validate LIMIT option for 'range queries' in SELECT statements """
         cursor = self.prepare(ordered=True)
 
@@ -311,11 +311,11 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE clicks")
 
             # Inserts
-            for id in xrange(0, 100):
+            for id in range(0, 100):
                 for tld in ['com', 'org', 'net']:
                     cursor.execute("INSERT INTO clicks (userid, url, time) VALUES ({}, 'http://foo.{}', 42)".format(id, tld))
 
@@ -324,7 +324,7 @@ class TestCQL(UpgradeTester):
 
             assert_one(cursor, "SELECT * FROM clicks WHERE token(userid) > token(2) LIMIT 1", [3, 'http://foo.com', 42])
 
-    def limit_multiget_test(self):
+    def test_limit_multiget(self):
         """ Validate LIMIT option for 'multiget' in SELECT statements """
         cursor = self.prepare()
 
@@ -338,11 +338,11 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE clicks")
 
             # Inserts
-            for id in xrange(0, 100):
+            for id in range(0, 100):
                 for tld in ['com', 'org', 'net']:
                     cursor.execute("INSERT INTO clicks (userid, url, time) VALUES ({}, 'http://foo.{}', 42)".format(id, tld))
 
@@ -357,7 +357,7 @@ class TestCQL(UpgradeTester):
                 # the coordinator is the non-upgraded 2.1 node
                 assert_one(cursor, "SELECT * FROM clicks WHERE userid IN (48, 2) LIMIT 1", [48, 'http://foo.com', 42])
 
-    def simple_tuple_query_test(self):
+    def test_simple_tuple_query(self):
         """
         @jira_ticket CASSANDRA-8613
         """
@@ -366,7 +366,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("create table bard (a int, b int, c int, d int , e int, PRIMARY KEY (a, b, c, d, e))")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE bard")
 
             cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 2, 0, 0, 0);""")
@@ -379,7 +379,7 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT * FROM bard WHERE b=0 AND (c, d, e) > (1, 1, 1) ALLOW FILTERING;", [[0, 0, 2, 2, 2], [0, 0, 3, 3, 3]])
 
-    def limit_sparse_test(self):
+    def test_limit_sparse(self):
         """ Validate LIMIT option for sparse table in SELECT statements """
         cursor = self.prepare()
 
@@ -395,11 +395,11 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE clicks")
 
             # Inserts
-            for id in xrange(0, 100):
+            for id in range(0, 100):
                 for tld in ['com', 'org', 'net']:
                     cursor.execute("INSERT INTO clicks (userid, url, day, month, year) VALUES ({}, 'http://foo.{}', 1, 'jan', 2012)".format(id, tld))
 
@@ -408,7 +408,7 @@ class TestCQL(UpgradeTester):
             res = list(cursor.execute("SELECT * FROM clicks LIMIT 4"))
             assert_length_equal(res, 4)
 
-    def counters_test(self):
+    def test_counters(self):
         """ Validate counter support """
         cursor = self.prepare()
 
@@ -422,7 +422,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE clicks")
 
             cursor.execute("UPDATE clicks SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'")
@@ -438,7 +438,7 @@ class TestCQL(UpgradeTester):
             cursor.execute("UPDATE clicks SET total = total -2 WHERE userid = 1 AND url = 'http://foo.com'")
             assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [-4])
 
-    def indexed_with_eq_test(self):
+    def test_indexed_with_eq(self):
         """ Check that you can query for an indexed column even with a key EQ clause """
         cursor = self.prepare()
 
@@ -455,7 +455,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE INDEX byAge ON users(age)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE users")
 
             # Inserts
@@ -467,7 +467,7 @@ class TestCQL(UpgradeTester):
 
             assert_one(cursor, "SELECT firstname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND age = 33", ['Samwise'])
 
-    def select_key_in_test(self):
+    def test_select_key_in(self):
         """ Query for KEY IN (...) """
         cursor = self.prepare()
 
@@ -482,7 +482,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE users")
 
             # Inserts
@@ -503,7 +503,7 @@ class TestCQL(UpgradeTester):
 
             assert_length_equal(res, 2)
 
-    def exclusive_slice_test(self):
+    def test_exclusive_slice(self):
         """ Test SELECT respects inclusive and exclusive bounds """
         cursor = self.prepare()
 
@@ -517,7 +517,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             # Inserts
@@ -540,7 +540,7 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c >= 2 AND c < 6 ORDER BY c DESC LIMIT 2", [[5], [4]])
 
-    def in_clause_wide_rows_test(self):
+    def test_in_clause_wide_rows(self):
         """ Check IN support for 'wide rows' in SELECT statement """
         cursor = self.prepare()
 
@@ -565,7 +565,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test1")
             cursor.execute("TRUNCATE test2")
 
@@ -589,7 +589,7 @@ class TestCQL(UpgradeTester):
 
                 assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 = 0 AND c2 IN (5, 2, 8)", [[2], [5], [8]])
 
-    def order_by_test(self):
+    def test_order_by(self):
         """ Check ORDER BY support in SELECT statement """
         cursor = self.prepare()
 
@@ -614,7 +614,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test1")
             cursor.execute("TRUNCATE test2")
 
@@ -622,7 +622,7 @@ class TestCQL(UpgradeTester):
             for x in range(0, 10):
                 cursor.execute("INSERT INTO test1 (k, c, v) VALUES (0, {}, {})".format(x, x))
 
-            assert_all(cursor, "SELECT v FROM test1 WHERE k = 0 ORDER BY c DESC", [[x] for x in reversed(range(10))])
+            assert_all(cursor, "SELECT v FROM test1 WHERE k = 0 ORDER BY c DESC", [[x] for x in reversed(list(range(10)))])
 
             # Inserts
             for x in range(0, 4):
@@ -634,11 +634,11 @@ class TestCQL(UpgradeTester):
             assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c2 DESC")
             assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY k DESC")
 
-            assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c1 DESC", [[x] for x in reversed(range(8))])
+            assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c1 DESC", [[x] for x in reversed(list(range(8)))])
 
             assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c1", [[x] for x in range(8)])
 
-    def more_order_by_test(self):
+    def test_more_order_by(self):
         """
         More ORDER BY checks
         @jira_ticket CASSANDRA-4160
@@ -665,7 +665,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 1, 'one');")
@@ -705,7 +705,7 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number <= 3 ORDER BY number DESC;", [[3, 1], [3, 0], [2, 1], [2, 0], [1, 0]])
 
-    def order_by_validation_test(self):
+    def test_order_by_validation(self):
         """
         Check we don't allow order by on row key
         @jira_ticket CASSANDRA-4246
@@ -722,7 +722,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             q = "INSERT INTO test (k1, k2, v) VALUES (%d, %d, %d)"
@@ -732,7 +732,7 @@ class TestCQL(UpgradeTester):
 
             assert_invalid(cursor, "SELECT * FROM test ORDER BY k2")
 
-    def order_by_with_in_test(self):
+    def test_order_by_with_in(self):
         """
         Check that order-by works with IN
         @jira_ticket CASSANDRA-4327
@@ -748,7 +748,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
             cursor.default_fetch_size = None
 
@@ -766,7 +766,7 @@ class TestCQL(UpgradeTester):
             query = "SELECT my_id, col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1"
             assert_all(cursor, query, [['key1', 1], ['key3', 2], ['key2', 3]])
 
-    def reversed_comparator_test(self):
+    def test_reversed_comparator(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -789,7 +789,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
             cursor.execute("TRUNCATE test2")
 
@@ -817,7 +817,7 @@ class TestCQL(UpgradeTester):
 
             assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c2 DESC, c1 ASC")
 
-    def null_support_test(self):
+    def test_null_support(self):
         """ Test support for nulls """
         cursor = self.prepare()
 
@@ -832,7 +832,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             # Inserts
@@ -850,7 +850,7 @@ class TestCQL(UpgradeTester):
             assert_invalid(cursor, "SELECT * FROM test WHERE k = null")
             assert_invalid(cursor, "INSERT INTO test (k, c, v2) VALUES (0, 0, { 'foo', 'bar', null })")
 
-    def nameless_index_test(self):
+    def test_nameless_index(self):
         """ Test CREATE INDEX without name and validate the index can be dropped """
         cursor = self.prepare()
 
@@ -864,7 +864,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE INDEX on users(birth_year)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE users")
 
             cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Tom', 42)")
@@ -873,12 +873,11 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT id FROM users WHERE birth_year = 42", [['Tom'], ['Bob']])
 
-    def deletion_test(self):
+    def test_deletion(self):
         """
         Test simple deletion and in particular check for #4193 bug
         @jira_ticket CASSANDRA-4193
         """
-
         cursor = self.prepare()
 
         cursor.execute("""
@@ -903,7 +902,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE testcf")
             cursor.execute("TRUNCATE testcf2")
 
@@ -931,7 +930,7 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT * FROM testcf", [list(row2)])
 
-    def count_test(self):
+    def test_count(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -945,7 +944,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE events")
 
             full = "INSERT INTO events (kind, time, value1, value2) VALUES ('ev1', %d, %d, %d)"
@@ -962,7 +961,7 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT COUNT(1) FROM events WHERE kind IN ('ev1', 'ev2') AND time=0", [[2]])
 
-    def batch_test(self):
+    def test_batch(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -974,7 +973,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE users")
 
             query = SimpleStatement("""
@@ -987,7 +986,7 @@ class TestCQL(UpgradeTester):
             """, consistency_level=ConsistencyLevel.QUORUM)
             cursor.execute(query)
 
-    def token_range_test(self):
+    def test_token_range(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -999,7 +998,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             c = 100
@@ -1019,7 +1018,7 @@ class TestCQL(UpgradeTester):
             # cursor.execute("SELECT k FROM test WHERE token(k) >= 0")
             assert_all(cursor, "SELECT k FROM test WHERE token(k) >= token({}) AND token(k) < token({})".format(inOrder[32], inOrder[65]), [[inOrder[x]] for x in range(32, 65)])
 
-    def timestamp_and_ttl_test(self):
+    def test_timestamp_and_ttl(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1031,7 +1030,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (k, c) VALUES (1, 'test')")
@@ -1042,11 +1041,11 @@ class TestCQL(UpgradeTester):
             assert_length_equal(res, 2)
 
             for r in res:
-                self.assertIsInstance(r[2], (int, long))
+                assert isinstance(r[2], (int, int))
                 if r[0] == 1:
-                    self.assertIsNone(r[3], res)
+                    assert r[3] is None, res
                 else:
-                    self.assertIsInstance(r[3], (int, long))
+                    assert isinstance(r[3], (int, int))
 
             # wrap writetime(), ttl() in other functions (test for CASSANDRA-8451)
             res = list(cursor.execute("SELECT k, c, blobAsBigint(bigintAsBlob(writetime(c))), ttl(c) FROM test"))
@@ -1054,29 +1053,29 @@ class TestCQL(UpgradeTester):
             assert_length_equal(res, 2)
 
             for r in res:
-                self.assertIsInstance(r[2], (int, long))
+                assert isinstance(r[2], (int, int))
                 if r[0] == 1:
-                    self.assertIsNone(r[3], res)
+                    assert r[3] is None, res
                 else:
-                    self.assertIsInstance(r[3], (int, long))
+                    assert isinstance(r[3], (int, int))
 
             res = list(cursor.execute("SELECT k, c, writetime(c), blobAsInt(intAsBlob(ttl(c))) FROM test"))
 
             assert_length_equal(res, 2)
 
             for r in res:
-                self.assertIsInstance(r[2], (int, long))
+                assert isinstance(r[2], (int, int))
                 if r[0] == 1:
-                    self.assertIsNone(r[3], res)
+                    assert r[3] is None, res
                 else:
-                    self.assertIsInstance(r[3], (int, long))
+                    assert isinstance(r[3], (int, int))
 
             assert_invalid(cursor, "SELECT k, c, writetime(k) FROM test")
 
             res = cursor.execute("SELECT k, d, writetime(d) FROM test WHERE k = 1")
             assert_one(cursor, "SELECT k, d, writetime(d) FROM test WHERE k = 1", [1, None, None])
 
-    def no_range_ghost_test(self):
+    def test_no_range_ghost(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1097,7 +1096,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
             cursor.execute("TRUNCATE ks1.users")
 
@@ -1124,7 +1123,7 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT * FROM users WHERE KEY='user1'", [])
 
-    def undefined_column_handling_test(self):
+    def test_undefined_column_handling(self):
         cursor = self.prepare(ordered=True)
 
         cursor.execute("""
@@ -1136,7 +1135,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (k, v1, v2) VALUES (0, 0, 0)")
@@ -1147,9 +1146,8 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT v2 FROM test WHERE k = 1", [[None]])
 
-    def range_tombstones_test(self):
+    def test_range_tombstones(self):
         """ Test deletion by 'composite prefix' (range tombstones) """
-
         # Uses 3 nodes just to make sure RowMutation are correctly serialized
         cursor = self.prepare(nodes=3)
 
@@ -1165,36 +1163,36 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test1")
 
             rows = 5
             col1 = 2
             col2 = 2
             cpr = col1 * col2
-            for i in xrange(0, rows):
-                for j in xrange(0, col1):
-                    for k in xrange(0, col2):
+            for i in range(0, rows):
+                for j in range(0, col1):
+                    for k in range(0, col2):
                         n = (i * cpr) + (j * col2) + k
                         cursor.execute("INSERT INTO test1 (k, c1, c2, v1, v2) VALUES ({}, {}, {}, {}, {})".format(i, j, k, n, n))
 
-            for i in xrange(0, rows):
+            for i in range(0, rows):
 
-                assert_all(cursor, "SELECT v1, v2 FROM test1 where k = %d" % i, [[x, x] for x in xrange(i * cpr, (i + 1) * cpr)])
+                assert_all(cursor, "SELECT v1, v2 FROM test1 where k = %d" % i, [[x, x] for x in range(i * cpr, (i + 1) * cpr)])
 
-            for i in xrange(0, rows):
+            for i in range(0, rows):
                 cursor.execute("DELETE FROM test1 WHERE k = %d AND c1 = 0" % i)
 
-            for i in xrange(0, rows):
-                assert_all(cursor, "SELECT v1, v2 FROM test1 WHERE k = %d" % i, [[x, x] for x in xrange(i * cpr + col1, (i + 1) * cpr)])
+            for i in range(0, rows):
+                assert_all(cursor, "SELECT v1, v2 FROM test1 WHERE k = %d" % i, [[x, x] for x in range(i * cpr + col1, (i + 1) * cpr)])
 
             self.cluster.flush()
             time.sleep(0.2)
 
-            for i in xrange(0, rows):
-                assert_all(cursor, "SELECT v1, v2 FROM test1 WHERE k = %d" % i, [[x, x] for x in xrange(i * cpr + col1, (i + 1) * cpr)])
+            for i in range(0, rows):
+                assert_all(cursor, "SELECT v1, v2 FROM test1 WHERE k = %d" % i, [[x, x] for x in range(i * cpr + col1, (i + 1) * cpr)])
 
-    def range_tombstones_compaction_test(self):
+    def test_range_tombstones_compaction(self):
         """ Test deletion by 'composite prefix' (range tombstones) with compaction """
         cursor = self.prepare()
 
@@ -1209,7 +1207,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test1")
 
             for c1 in range(0, 4):
@@ -1223,9 +1221,9 @@ class TestCQL(UpgradeTester):
             self.cluster.flush()
             self.cluster.compact()
 
-            assert_all(cursor, "SELECT v1 FROM test1 WHERE k = 0", [['{}{}'.format(c1, c2)] for c1 in xrange(0, 4) for c2 in xrange(0, 2) if c1 != 1])
+            assert_all(cursor, "SELECT v1 FROM test1 WHERE k = 0", [['{}{}'.format(c1, c2)] for c1 in range(0, 4) for c2 in range(0, 2) if c1 != 1])
 
-    def delete_row_test(self):
+    def test_delete_row(self):
         """ Test deletion of rows """
         cursor = self.prepare()
 
@@ -1241,7 +1239,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             q = "INSERT INTO test (k, c1, c2, v1, v2) VALUES (%d, %d, %d, %d, %d)"
@@ -1256,7 +1254,7 @@ class TestCQL(UpgradeTester):
 
             assert_length_equal(res, 3)
 
-    def range_query_2ndary_test(self):
+    def test_range_query_2ndary(self):
         """
         Test range queries with 2ndary indexes
         @jira_ticket CASSANDRA-4257
@@ -1267,7 +1265,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE INDEX indextest_setid_idx ON indextest (setid)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE indextest")
 
             q = "INSERT INTO indextest (id, row, setid) VALUES (%d, %d, %d);"
@@ -1280,7 +1278,7 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT * FROM indextest WHERE setid = 0 AND row < 1 ALLOW FILTERING;", [[0, 0, 0]])
 
-    def set_test(self):
+    def test_set(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1293,7 +1291,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE user")
 
             q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
@@ -1322,7 +1320,7 @@ class TestCQL(UpgradeTester):
             cursor.execute("DELETE tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
             assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [])
 
-    def map_test(self):
+    def test_map(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1335,7 +1333,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE user")
 
             q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
@@ -1362,7 +1360,7 @@ class TestCQL(UpgradeTester):
             cursor.execute(q % "m = {}")
             assert_all(cursor, "SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'", [])
 
-    def list_test(self):
+    def test_list(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1375,7 +1373,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE user")
 
             q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
@@ -1402,7 +1400,7 @@ class TestCQL(UpgradeTester):
             cursor.execute(q % "tags = tags - [ 'bar' ]")
             assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'c', 'c']])
 
-    def multi_collection_test(self):
+    def test_multi_collection(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1415,7 +1413,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE foo")
 
             cursor.execute("UPDATE ks.foo SET L = [1, 3, 5] WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
@@ -1431,7 +1429,7 @@ class TestCQL(UpgradeTester):
                 sortedset([1, 3, 5, 7, 11, 13])
             ]])
 
-    def range_query_test(self):
+    def test_range_query(self):
         """
         Range test query from #4372
         @jira_ticket CASSANDRA-4372
@@ -1441,7 +1439,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE TABLE test (a int, b int, c int, d int, e int, f text, PRIMARY KEY (a, b, c, d, e) )")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 2, '2');")
@@ -1450,9 +1448,9 @@ class TestCQL(UpgradeTester):
             cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 3, '3');")
             cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 5, '5');")
 
-            assert_all(cursor, "SELECT a, b, c, d, e, f FROM test WHERE a = 1 AND b = 1 AND c = 1 AND d = 1 AND e >= 2;", [[1, 1, 1, 1, 2, u'2'], [1, 1, 1, 1, 3, u'3'], [1, 1, 1, 1, 5, u'5']])
+            assert_all(cursor, "SELECT a, b, c, d, e, f FROM test WHERE a = 1 AND b = 1 AND c = 1 AND d = 1 AND e >= 2;", [[1, 1, 1, 1, 2, '2'], [1, 1, 1, 1, 3, '3'], [1, 1, 1, 1, 5, '5']])
 
-    def composite_row_key_test(self):
+    def test_composite_row_key(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1466,7 +1464,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             req = "INSERT INTO test (k1, k2, c, v) VALUES ({}, {}, {}, {})"
@@ -1488,7 +1486,7 @@ class TestCQL(UpgradeTester):
             assert_all(cursor, "SELECT * FROM test WHERE token(k1, k2) > " + str(-((2 ** 63) - 1)), [[0, 2, 2, 2], [0, 3, 3, 3], [0, 0, 0, 0], [0, 1, 1, 1]])
 
     @since('2', max_version='4')
-    def cql3_insert_thrift_test(self):
+    def test_cql3_insert_thrift(self):
         """
         Check that we can insert from thrift into a CQL3 table
         @jira_ticket CASSANDRA-4377
@@ -1505,7 +1503,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             node = self.cluster.nodelist()[0]
@@ -1525,7 +1523,7 @@ class TestCQL(UpgradeTester):
             assert_one(cursor, "SELECT * FROM test", [2, 4, 8])
 
     @since('2', max_version='4')
-    def cql3_non_compound_range_tombstones_test(self):
+    def test_cql3_non_compound_range_tombstones(self):
         """
         Checks that 3.0 serializes RangeTombstoneLists correctly
         when communicating with 2.2 nodes.
@@ -1554,13 +1552,13 @@ class TestCQL(UpgradeTester):
         session.cluster.control_connection.wait_for_schema_agreement()
 
         for is_upgraded, session, node in self.do_upgrade(session, return_nodes=True):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
 
             upgrade_to_version = self.get_node_version(is_upgraded=True)
             if LooseVersion('3.0.0') <= upgrade_to_version <= LooseVersion('3.0.6'):
-                self.skip('CASSANDRA-11930 was fixed in 3.0.7 and 3.7')
+                pytest.skip(msg='CASSANDRA-11930 was fixed in 3.0.7 and 3.7')
             elif LooseVersion('3.1') <= upgrade_to_version <= LooseVersion('3.6'):
-                self.skip('CASSANDRA-11930 was fixed in 3.0.7 and 3.7')
+                pytest.skip(msg='CASSANDRA-11930 was fixed in 3.0.7 and 3.7')
 
             session.execute("TRUNCATE ks.cf")
 
@@ -1571,7 +1569,7 @@ class TestCQL(UpgradeTester):
 
             # insert a number of keys so that we'll get rows on both the old and upgraded nodes
             for key in ['key{}'.format(i) for i in range(10)]:
-                debug("Using key " + key)
+                logger.debug("Using key " + key)
 
                 # insert "static" column
                 client.batch_mutate(
@@ -1588,13 +1586,13 @@ class TestCQL(UpgradeTester):
                 # sanity check on the query
                 fetch_slice = SlicePredicate(slice_range=SliceRange('', '', False, 100))
                 row = client.get_slice(key, ColumnParent(column_family='cf'), fetch_slice, ThriftConsistencyLevel.ALL)
-                self.assertEqual(6, len(row), row)
+                assert 6 == len(row), row
                 cols = OrderedDict([(cosc.column.name, cosc.column.value) for cosc in row])
-                debug(cols)
-                self.assertEqual(['a', 'b', 'c', 'd', 'e', 'static1'], cols.keys())
-                self.assertEqual('val0', cols['a'])
-                self.assertEqual('val4', cols['e'])
-                self.assertEqual(struct.pack('>i', 1), cols['static1'])
+                logger.debug(cols)
+                assert ['a', 'b', 'c', 'd', 'e', 'static1'] == list(cols.keys())
+                assert 'val0' == cols['a']
+                assert 'val4' == cols['e']
+                assert struct.pack('>i', 1) == cols['static1']
 
                 # delete a slice of dynamic columns
                 slice_range = SliceRange('b', 'd', False, 100)
@@ -1604,15 +1602,15 @@ class TestCQL(UpgradeTester):
 
                 # check remaining columns
                 row = client.get_slice(key, ColumnParent(column_family='cf'), fetch_slice, ThriftConsistencyLevel.ALL)
-                self.assertEqual(3, len(row), row)
+                assert 3 == len(row), row
                 cols = OrderedDict([(cosc.column.name, cosc.column.value) for cosc in row])
-                debug(cols)
-                self.assertEqual(['a', 'e', 'static1'], cols.keys())
-                self.assertEqual('val0', cols['a'])
-                self.assertEqual('val4', cols['e'])
-                self.assertEqual(struct.pack('>i', 1), cols['static1'])
+                logger.debug(cols)
+                assert ['a', 'e', 'static1'] == list(cols.keys())
+                assert 'val0' == cols['a']
+                assert 'val4' == cols['e']
+                assert struct.pack('>i', 1) == cols['static1']
 
-    def row_existence_test(self):
+    def test_row_existence(self):
         """
         Check the semantic of CQL row existence (part of #4361)
         @jira_ticket CASSANDRA-4361
@@ -1630,7 +1628,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (k, c, v1, v2) VALUES (1, 1, 1, 1)")
@@ -1651,7 +1649,7 @@ class TestCQL(UpgradeTester):
             cursor.execute("INSERT INTO test (k, c) VALUES (2, 2)")
             assert_one(cursor, "SELECT * FROM test", [2, 2, None, None])
 
-    def only_pk_test(self):
+    def test_only_pk(self):
         """
         Check table with only a PK (part of #4361)
         @jira_ticket CASSANDRA-4361
@@ -1676,7 +1674,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
             cursor.execute("TRUNCATE test2")
 
@@ -1696,11 +1694,11 @@ class TestCQL(UpgradeTester):
             query = "SELECT * FROM test2"
             assert_all(cursor, query, [[x, y] for x in range(0, 2) for y in range(0, 2)])
 
-    def no_clustering_test(self):
+    def test_no_clustering(self):
         cursor = self.prepare()
         cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
 
             for i in range(10):
                 cursor.execute("INSERT INTO test (k, v) VALUES (%s, %s)", (i, i))
@@ -1709,7 +1707,7 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT * FROM test", [[i, i] for i in range(10)], ignore_order=True)
 
-    def date_test(self):
+    def test_date(self):
         """ Check dates are correctly recognized and validated """
         cursor = self.prepare()
 
@@ -1721,18 +1719,17 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (k, t) VALUES (0, '2011-02-03')")
             assert_invalid(cursor, "INSERT INTO test (k, t) VALUES (0, '2011-42-42')")
 
-    def range_slice_test(self):
+    def test_range_slice(self):
         """
         Test a regression from #1337
         @jira_ticket CASSANDRA-1337
         """
-
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1743,7 +1740,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (k, v) VALUES ('foo', 0)")
@@ -1751,7 +1748,7 @@ class TestCQL(UpgradeTester):
 
             assert_row_count(cursor, 'test', 2)
 
-    def composite_index_with_pk_test(self):
+    def test_composite_index_with_pk(self):
 
         cursor = self.prepare(ordered=True)
         cursor.execute("""
@@ -1768,7 +1765,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE INDEX ON blogs(author)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE blogs")
 
             req = "INSERT INTO blogs (blog_id, time1, time2, author, content) VALUES (%d, %d, %d, '%s', '%s')"
@@ -1814,12 +1811,11 @@ class TestCQL(UpgradeTester):
                 assert_invalid(cursor, "SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo'")
                 assert_invalid(cursor, "SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo'")
 
-    def limit_bugs_test(self):
+    def test_limit_bugs(self):
         """
         Test for LIMIT bugs from #4579
         @jira_ticket CASSANDRA-4579
         """
-
         cursor = self.prepare(ordered=True)
         cursor.execute("""
             CREATE TABLE testcf (
@@ -1841,7 +1837,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE testcf")
             cursor.execute("TRUNCATE testcf2")
 
@@ -1873,12 +1869,11 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT * FROM testcf2 LIMIT 5;", [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])
 
-    def npe_composite_table_slice_test(self):
+    def test_npe_composite_table_slice(self):
         """
         Test for NPE when trying to select a slice from a composite table
         @jira_ticket CASSANDRA-4532
         """
-
         cursor = self.prepare()
         cursor.execute("""
             CREATE TABLE compositetest(
@@ -1891,7 +1886,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE compositetest")
 
             cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345678,'key1','')")
@@ -1904,12 +1899,11 @@ class TestCQL(UpgradeTester):
             assert_invalid(cursor, "SELECT * FROM compositetest WHERE ctime>=12345679 AND key='key3' AND ctime<=12345680 LIMIT 3;")
             assert_invalid(cursor, "SELECT * FROM compositetest WHERE ctime=12345679  AND key='key3' AND ctime<=12345680 LIMIT 3;")
 
-    def order_by_multikey_test(self):
+    def test_order_by_multikey(self):
         """
         Test for #4612 bug and more generally order by when multiple C* rows are queried
         @jira_ticket CASSANDRA-4612
         """
-
         cursor = self.prepare(ordered=True)
         cursor.execute("""
             CREATE TABLE test(
@@ -1922,7 +1916,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
             cursor.default_fetch_size = None
 
@@ -1940,7 +1934,7 @@ class TestCQL(UpgradeTester):
             assert_invalid(cursor, "SELECT col1 FROM test ORDER BY col1;")
             assert_invalid(cursor, "SELECT col1 FROM test WHERE my_id > 'key1' ORDER BY col1;")
 
-    def remove_range_slice_test(self):
+    def test_remove_range_slice(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1951,7 +1945,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             for i in range(0, 3):
@@ -1960,7 +1954,7 @@ class TestCQL(UpgradeTester):
             cursor.execute("DELETE FROM test WHERE k = 1")
             assert_all(cursor, "SELECT * FROM test", [[0, 0], [2, 2]])
 
-    def indexes_composite_test(self):
+    def test_indexes_composite(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -1977,7 +1971,7 @@ class TestCQL(UpgradeTester):
         time.sleep(1)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             req = "INSERT INTO test (blog_id, timestamp, author, content) VALUES (%d, %d, '%s', '%s')"
@@ -2002,19 +1996,18 @@ class TestCQL(UpgradeTester):
             query = "SELECT blog_id, timestamp FROM test WHERE author = 'bob'"
             assert_all(cursor, query, [[1, 0], [1, 3], [0, 0]])
 
-    def refuse_in_with_indexes_test(self):
+    def test_refuse_in_with_indexes(self):
         """
         Test for the validation bug of #4709
         @jira_ticket CASSANDRA-4709
         """
-
         cursor = self.prepare()
         cursor.execute("create table t1 (pk varchar primary key, col1 varchar, col2 varchar);")
         cursor.execute("create index t1_c1 on t1(col1);")
         cursor.execute("create index t1_c2 on t1(col2);")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE t1")
 
             cursor.execute("insert into t1  (pk, col1, col2) values ('pk1','foo1','bar1');")
@@ -2025,12 +2018,11 @@ class TestCQL(UpgradeTester):
             cursor.execute("insert into t1  (pk, col1, col2) values ('pk3','foo3','bar3');")
             assert_invalid(cursor, "select * from t1 where col2 in ('bar1', 'bar2');")
 
-    def reversed_compact_test(self):
+    def test_reversed_compact(self):
         """
         Test for #4716 bug and more generally for good behavior of ordering
         @jira_ticket CASSANDRA-4716
         """
-
         cursor = self.prepare()
         cursor.execute("""
             CREATE TABLE test1 (
@@ -2052,7 +2044,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test1")
             cursor.execute("TRUNCATE test2")
 
@@ -2098,13 +2090,12 @@ class TestCQL(UpgradeTester):
             query = "SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"
             assert_all(cursor, query, [[6], [5], [4], [3], [2]])
 
-    def reversed_compact_multikey_test(self):
+    def test_reversed_compact_multikey(self):
         """
         Test for the bug from #4760 and #4759
         @jira_ticket CASSANDRA-4760
         @jira_ticket CASSANDRA-4759
         """
-
         cursor = self.prepare()
         cursor.execute("""
             CREATE TABLE test (
@@ -2118,7 +2109,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             for i in range(0, 3):
@@ -2182,7 +2173,7 @@ class TestCQL(UpgradeTester):
             query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 DESC, c2 DESC"
             assert_all(cursor, query, [[1, 2], [1, 1], [1, 0], [0, 2], [0, 1], [0, 0]])
 
-    def collection_and_regular_test(self):
+    def test_collection_and_regular(self):
 
         cursor = self.prepare()
 
@@ -2195,14 +2186,14 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test(k, l, c) VALUES(3, [0, 1, 2], 4)")
             cursor.execute("UPDATE test SET l[0] = 1, c = 42 WHERE k = 3")
             assert_one(cursor, "SELECT l, c FROM test WHERE k = 3", [[1, 1, 2], 42])
 
-    def batch_and_list_test(self):
+    def test_batch_and_list(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2213,7 +2204,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("""
@@ -2236,7 +2227,7 @@ class TestCQL(UpgradeTester):
 
             assert_one(cursor, "SELECT l FROM test WHERE k = 1", [[3, 2, 1]])
 
-    def boolean_test(self):
+    def test_boolean(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2247,13 +2238,13 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (k, b) VALUES (true, false)")
             assert_one(cursor, "SELECT * FROM test WHERE k = true", [True, False])
 
-    def multiordering_test(self):
+    def test_multiordering(self):
         cursor = self.prepare()
         cursor.execute("""
             CREATE TABLE test (
@@ -2265,7 +2256,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             for i in range(0, 2):
@@ -2285,7 +2276,7 @@ class TestCQL(UpgradeTester):
             assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c2 ASC")
             assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 ASC, c2 ASC")
 
-    def returned_null_test(self):
+    def test_returned_null(self):
         """
         Test for returned null.
         StorageProxy short read protection hadn't been updated after the changes made by CASSANDRA-3647,
@@ -2305,7 +2296,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 0, 0, 0);")
@@ -2316,7 +2307,7 @@ class TestCQL(UpgradeTester):
             query = "SELECT * FROM test WHERE k = 0 LIMIT 1;"
             assert_one(cursor, query, [0, 0, 2, 2])
 
-    def multi_list_set_test(self):
+    def test_multi_list_set(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2335,7 +2326,7 @@ class TestCQL(UpgradeTester):
 
             assert_one(cursor, "SELECT l1, l2 FROM test WHERE k = 0", [[1, 24, 3], [4, 42, 6]])
 
-    def composite_index_collections_test(self):
+    def test_composite_index_collections(self):
         cursor = self.prepare(ordered=True)
         cursor.execute("""
             CREATE TABLE blogs (
@@ -2351,7 +2342,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE INDEX ON blogs(author)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE blogs")
 
             req = "INSERT INTO blogs (blog_id, time1, time2, author, content) VALUES (%d, %d, %d, '%s', %s)"
@@ -2363,7 +2354,7 @@ class TestCQL(UpgradeTester):
             query = "SELECT blog_id, content FROM blogs WHERE author='foo'"
             assert_all(cursor, query, [[1, set(['bar1', 'bar2'])], [1, set(['bar2', 'bar3'])], [2, set(['baz'])]])
 
-    def truncate_clean_cache_test(self):
+    def test_truncate_clean_cache(self):
         cursor = self.prepare(ordered=True, use_cache=True)
 
         if self.node_version_above('2.1'):
@@ -2384,7 +2375,7 @@ class TestCQL(UpgradeTester):
             """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             for i in range(0, 3):
@@ -2398,7 +2389,7 @@ class TestCQL(UpgradeTester):
             query = "SELECT v1, v2 FROM test WHERE k IN (0, 1, 2)"
             assert_none(cursor, query)
 
-    def range_with_deletes_test(self):
+    def test_range_with_deletes(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2409,7 +2400,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             nb_keys = 30
@@ -2418,13 +2409,13 @@ class TestCQL(UpgradeTester):
             for i in range(0, nb_keys):
                 cursor.execute("INSERT INTO test(k, v) VALUES ({}, {})".format(i, i))
 
-            for i in random.sample(xrange(nb_keys), nb_deletes):
+            for i in random.sample(range(nb_keys), nb_deletes):
                 cursor.execute("DELETE FROM test WHERE k = {}".format(i))
 
             res = list(cursor.execute("SELECT * FROM test LIMIT {}".format(nb_keys / 2)))
             assert_length_equal(res, nb_keys / 2)
 
-    def collection_function_test(self):
+    def test_collection_function(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2435,11 +2426,11 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             assert_invalid(cursor, "SELECT ttl(l) FROM test WHERE k = 0")
             assert_invalid(cursor, "SELECT writetime(l) FROM test WHERE k = 0")
 
-    def composite_partition_key_validation_test(self):
+    def test_composite_partition_key_validation(self):
         """
         Test for bug from #5122
         @jira_ticket CASSANDRA-5122
@@ -2449,7 +2440,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE TABLE foo (a int, b text, c uuid, PRIMARY KEY ((a, b)));")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE foo")
 
             cursor.execute("INSERT INTO foo (a, b , c ) VALUES (  1 , 'aze', 4d481800-4c5f-11e1-82e0-3f484de45426)")
@@ -2461,11 +2452,11 @@ class TestCQL(UpgradeTester):
             assert_invalid(cursor, "SELECT * FROM foo WHERE a=1")
 
     @since('2.2')
-    def multi_in_test(self):
+    def test_multi_in(self):
         self.__multi_in(False)
 
     @since('2.2')
-    def multi_in_compact_test(self):
+    def test_multi_in_compact(self):
         self.__multi_in(True)
 
     def __multi_in(self, compact):
@@ -2507,7 +2498,7 @@ class TestCQL(UpgradeTester):
         cursor.execute(create)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE zipcodes")
 
             for d in data:
@@ -2548,7 +2539,7 @@ class TestCQL(UpgradeTester):
                 assert_none(cursor, "select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions < 0")
 
     @since('2.2')
-    def multi_in_compact_non_composite_test(self):
+    def test_multi_in_compact_non_composite(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2561,7 +2552,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 0, 0)")
@@ -2571,7 +2562,7 @@ class TestCQL(UpgradeTester):
             query = "SELECT * FROM test WHERE key=0 AND c IN (0, 2)"
             assert_all(cursor, query, [[0, 0, 0], [0, 2, 2]])
 
-    def large_clustering_in_test(self):
+    def test_large_clustering_in(self):
         """
         @jira_ticket CASSANDRA-8410
         """
@@ -2587,7 +2578,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             insert_statement = cursor.prepare("INSERT INTO test (k, c, v) VALUES (?, ?, ?)")
@@ -2600,7 +2591,7 @@ class TestCQL(UpgradeTester):
             rows = list(cursor.execute(select_statement, [0, in_values]))
 
             assert_length_equal(rows, 1)
-            self.assertEqual((0, 0, 0), rows[0])
+            assert (0, 0, 0) == rows[0]
 
             # insert approximately 1000 random rows between 0 and 10k
             clustering_values = set([random.randint(0, 9999) for _ in range(1000)])
@@ -2611,7 +2602,7 @@ class TestCQL(UpgradeTester):
             rows = list(cursor.execute(select_statement, [0, in_values]))
             assert_length_equal(rows, len(clustering_values))
 
-    def timeuuid_test(self):
+    def test_timeuuid(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2623,7 +2614,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             assert_invalid(cursor, "INSERT INTO test (k, t) VALUES (0, 2012-11-07 18:18:22-0800)", expected=SyntaxException)
@@ -2651,7 +2642,7 @@ class TestCQL(UpgradeTester):
             cursor.execute("SELECT t FROM test WHERE k = 0 AND t > maxTimeuuid(1234567) AND t < minTimeuuid('2012-11-07 18:18:22-0800')")
             # not sure what to check exactly so just checking the query returns
 
-    def float_with_exponent_test(self):
+    def test_float_with_exponent(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2663,14 +2654,14 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test(k, d, f) VALUES (0, 3E+10, 3.4E3)")
             cursor.execute("INSERT INTO test(k, d, f) VALUES (1, 3.E10, -23.44E-3)")
             cursor.execute("INSERT INTO test(k, d, f) VALUES (2, 3, -2)")
 
-    def compact_metadata_test(self):
+    def test_compact_metadata(self):
         """
         Test regression from #5189
         @jira_ticket CASSANDRA-5189
@@ -2685,13 +2676,13 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE bar")
 
             cursor.execute("INSERT INTO bar (id, i) VALUES (1, 2);")
             assert_one(cursor, "SELECT * FROM bar", [1, 2])
 
-    def query_compact_tables_during_upgrade_test(self):
+    def test_query_compact_tables_during_upgrade(self):
         """
         Check that un-upgraded sstables for compact storage tables
         can be read after an upgrade. Checks for a regression where
@@ -2714,33 +2705,33 @@ class TestCQL(UpgradeTester):
 
         execute_concurrent_with_args(cursor,
                                      cursor.prepare("INSERT INTO t1 (a, b) VALUES (?, ?)"),
-                                     [(i, i) for i in xrange(100)])
+                                     [(i, i) for i in range(100)])
         self.cluster.flush()
 
         def check_read_all(cursor):
             read_count = 0
             # first read each row separately - obviously, we should be able to retrieve all 100
-            for i in xrange(100):
+            for i in range(100):
                 res = cursor.execute("SELECT * FROM t1 WHERE a = {a}".format(a=i))
                 read_count += len(rows_to_list(res))
-            debug("Querying for individual keys retrieved {c} results".format(c=read_count))
-            self.assertEqual(read_count, 100)
+            logger.debug("Querying for individual keys retrieved {c} results".format(c=read_count))
+            assert read_count == 100
             # now a range slice, again all 100 rows should be retrievable
             res = rows_to_list(cursor.execute("SELECT * FROM t1"))
             read_count = len(res)
-            debug("Range request retrieved {c} rows".format(c=read_count))
+            logger.debug("Range request retrieved {c} rows".format(c=read_count))
             assert_length_equal(res, 100)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {state} node".format(state="upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {state} node".format(state="upgraded" if is_upgraded else "old"))
             check_read_all(cursor)
 
-        debug("Querying upgraded node after running upgradesstables")
+        logger.debug("Querying upgraded node after running upgradesstables")
         node1 = self.cluster.nodelist()[0]
         node1.nodetool("upgradesstables -a")
         check_read_all(self.patient_exclusive_cql_connection(node1, keyspace="ks"))
 
-    def clustering_indexing_test(self):
+    def test_clustering_indexing(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2759,7 +2750,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE INDEX ON posts(id2)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE posts")
 
             cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'bob', 0, 'A', 'A')")
@@ -2786,7 +2777,7 @@ class TestCQL(UpgradeTester):
             query = "SELECT v1 FROM posts WHERE time = 1"
             assert_all(cursor, query, [['B'], ['E']])
 
-    def edge_2i_on_complex_pk_test(self):
+    def test_edge_2i_on_complex_pk(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2807,7 +2798,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE INDEX ON indexed(ck2)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE indexed")
 
             cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (0, 1, 2, 3, 4, 5)")
@@ -2825,13 +2816,12 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, "SELECT value FROM indexed WHERE pk0 = 5 AND pk1 = 0 AND ck0 = 1 AND ck2 = 3 ALLOW FILTERING", [[4]])
 
-    def end_of_component_as_end_key_test(self):
+    def test_end_of_component_as_end_key(self):
         """
         Test to make sure that an end-of-component is no longer being used as the end key of the range when
         a secondary index is involved.
         @jira_ticket CASSANDRA-5240
         """
-
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2847,7 +2837,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE INDEX ON test(severity);")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 1, 1);")
@@ -2862,7 +2852,7 @@ class TestCQL(UpgradeTester):
             query = "select * from test where severity = 3 and interval = 't' and seq =1;"
             assert_one(cursor, query, ['t', 1, 4, 3])
 
-    def ticket_5230_test(self):
+    def test_ticket_5230(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2875,7 +2865,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE foo")
 
             cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '1', '1')")
@@ -2885,7 +2875,7 @@ class TestCQL(UpgradeTester):
             query = "SELECT c FROM foo WHERE key = 'foo' AND c IN ('1', '2');"
             assert_all(cursor, query, [['1'], ['2']])
 
-    def conversion_functions_test(self):
+    def test_conversion_functions(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -2897,7 +2887,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test(k, i, b) VALUES (0, blobAsVarint(bigintAsBlob(3)), textAsBlob('foobar'))")
@@ -2906,7 +2896,7 @@ class TestCQL(UpgradeTester):
 
     # Fixed by CASSANDRA-12654 in 3.12
     @since('2.0', max_version='3.12')
-    def IN_clause_on_last_key_test(self):
+    def test_IN_clause_on_last_key(self):
         """
         Tests patch to improve validation by not throwing an assertion when using map, list, or set
         with IN clauses on the last key.
@@ -2925,10 +2915,10 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             assert_invalid(cursor, "select * from test where key = 'foo' and c in (1,3,4);")
 
-    def function_and_reverse_type_test(self):
+    def test_function_and_reverse_type(self):
         """
         @jira_ticket CASSANDRA-5386
         """
@@ -2943,10 +2933,10 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("INSERT INTO test (k, c, v) VALUES (0, now(), 0);")
 
-    def NPE_during_select_with_token_test(self):
+    def test_NPE_during_select_with_token(self):
         """
         Test for NPE during CQL3 select with token()
         @jira_ticket CASSANDRA-5404
@@ -2956,24 +2946,24 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE TABLE test (key text PRIMARY KEY)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             # We just want to make sure this doesn't NPE server side
             assert_invalid(cursor, "select * from test where token(key) > token(int(3030343330393233)) limit 1;")
 
-    def empty_blob_test(self):
+    def test_empty_blob(self):
         cursor = self.prepare()
 
         cursor.execute("CREATE TABLE test (k int PRIMARY KEY, b blob)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test (k, b) VALUES (0, 0x)")
             assert_one(cursor, "SELECT * FROM test", [0, ''])
 
     @since('2', max_version='4')
-    def rename_test(self):
+    def test_rename(self):
         cursor = self.prepare(start_rpc=True)
 
         node = self.cluster.nodelist()[0]
@@ -3001,10 +2991,10 @@ class TestCQL(UpgradeTester):
         cursor.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             assert_one(cursor, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
 
-    def clustering_order_and_functions_test(self):
+    def test_clustering_order_and_functions(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -3016,7 +3006,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             for i in range(0, 5):
@@ -3024,7 +3014,7 @@ class TestCQL(UpgradeTester):
 
             cursor.execute("SELECT dateOf(t) FROM test")
 
-    def conditional_update_test(self):
+    def test_conditional_update(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -3037,7 +3027,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             # Shouldn't apply
@@ -3099,7 +3089,7 @@ class TestCQL(UpgradeTester):
                 assert_one(cursor, "DELETE FROM test WHERE k = 0 IF v1 IN (null)", [True])
 
     @since('2.1.1')
-    def non_eq_conditional_update_test(self):
+    def test_non_eq_conditional_update(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -3112,7 +3102,7 @@ class TestCQL(UpgradeTester):
         """)
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             # non-EQ conditions
@@ -3127,7 +3117,7 @@ class TestCQL(UpgradeTester):
             assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN (142, 276)", [False, 2])
             assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN ()", [False, 2])
 
-    def conditional_delete_test(self):
+    def test_conditional_delete(self):
         cursor = self.prepare()
 
         cursor.execute("""
@@ -3148,7 +3138,7 @@ class TestCQL(UpgradeTester):
             )""")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
             cursor.execute("TRUNCATE test2")
 
@@ -3185,13 +3175,13 @@ class TestCQL(UpgradeTester):
                 assert_invalid(cursor, "DELETE FROM test2 WHERE k = 0 AND i > 0 IF EXISTS")
                 assert_invalid(cursor, "DELETE FROM test2 WHERE k = 0 AND i > 0 IF v = 'foo'")
 
-    def range_key_ordered_test(self):
+    def test_range_key_ordered(self):
         cursor = self.prepare(ordered=True)
 
         cursor.execute("CREATE TABLE test ( k int PRIMARY KEY)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test(k) VALUES (-1)")
@@ -3201,12 +3191,12 @@ class TestCQL(UpgradeTester):
             assert_all(cursor, "SELECT * FROM test", [[0], [1], [-1]])
             assert_invalid(cursor, "SELECT * FROM test WHERE k >= -1 AND k < 1;")
 
-    def select_with_alias_test(self):
+    def test_select_with_alias(self):
         cursor = self.prepare()
         cursor.execute('CREATE TABLE users (id int PRIMARY KEY, name text)')
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE users")
 
             for id in range(0, 5):
@@ -3214,30 +3204,30 @@ class TestCQL(UpgradeTester):
 
             # test aliasing count(*)
             res = cursor.execute('SELECT count(*) AS user_count FROM users')
-            self.assertEqual('user_count', res[0]._fields[0])
-            self.assertEqual(5, res[0].user_count)
+            assert 'user_count' == res[0]._fields[0]
+            assert 5 == res[0].user_count
 
             # test aliasing regular value
             res = cursor.execute('SELECT name AS user_name FROM users WHERE id = 0')
-            self.assertEqual('user_name', res[0]._fields[0])
-            self.assertEqual('name0', res[0].user_name)
+            assert 'user_name' == res[0]._fields[0]
+            assert 'name0' == res[0].user_name
 
             # test aliasing writetime
             res = cursor.execute('SELECT writeTime(name) AS name_writetime FROM users WHERE id = 0')
-            self.assertEqual('name_writetime', res[0]._fields[0])
-            self.assertEqual(0, res[0].name_writetime)
+            assert 'name_writetime' == res[0]._fields[0]
+            assert 0 == res[0].name_writetime
 
             # test aliasing ttl
             res = cursor.execute('SELECT ttl(name) AS name_ttl FROM users WHERE id = 0')
-            self.assertEqual('name_ttl', res[0]._fields[0])
-            self.assertIn(res[0].name_ttl, (9, 10))
+            assert 'name_ttl' == res[0]._fields[0]
+            assert res[0].name_ttl, (9 in 10)
 
             # test aliasing a regular function
             res = cursor.execute('SELECT intAsBlob(id) AS id_blob FROM users WHERE id = 0')
-            self.assertEqual('id_blob', res[0]._fields[0])
-            self.assertEqual('\x00\x00\x00\x00', res[0].id_blob)
+            assert 'id_blob' == res[0]._fields[0]
+            assert '\x00\x00\x00\x00' == res[0].id_blob
 
-            debug("Current node version is {}".format(self.get_node_version(is_upgraded)))
+            logger.debug("Current node version is {}".format(self.get_node_version(is_upgraded)))
 
             if self.get_node_version(is_upgraded) < LooseVersion('3.8'):
                 error_msg = "Aliases aren't allowed in the where clause"
@@ -3253,7 +3243,7 @@ class TestCQL(UpgradeTester):
             # test that select throws a meaningful exception for aliases in order by clause
             assert_invalid(cursor, 'SELECT id AS user_id, name AS user_name FROM users WHERE id IN (0) ORDER BY user_name', matching=error_msg)
 
-    def nonpure_function_collection_test(self):
+    def test_nonpure_function_collection(self):
         """
         @jira_ticket CASSANDRA-5795
         """
@@ -3261,18 +3251,18 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<timeuuid>)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             # we just want to make sure this doesn't throw
             cursor.execute("INSERT INTO test(k, v) VALUES (0, [now()])")
 
-    def empty_in_test(self):
+    def test_empty_in(self):
         cursor = self.prepare()
         cursor.execute("CREATE TABLE test (k1 int, k2 int, v int, PRIMARY KEY (k1, k2))")
         # Same test, but for compact
         cursor.execute("CREATE TABLE test_compact (k1 int, k2 int, v int, PRIMARY KEY (k1, k2)) WITH COMPACT STORAGE")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
             cursor.execute("TRUNCATE test_compact")
 
@@ -3312,7 +3302,7 @@ class TestCQL(UpgradeTester):
             cursor.execute("UPDATE test_compact SET v = 3 WHERE k1 IN () AND k2 = 2")
             assert_nothing_changed("test_compact")
 
-    def collection_flush_test(self):
+    def test_collection_flush(self):
         """
         @jira_ticket CASSANDRA-5805
         """
@@ -3321,7 +3311,7 @@ class TestCQL(UpgradeTester):
         cursor.execute("CREATE TABLE test (k int PRIMARY KEY, s set<int>)")
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE test")
 
             cursor.execute("INSERT INTO test(k, s) VALUES (1, {1})")
@@ -3331,7 +3321,7 @@ class TestCQL(UpgradeTester):
 
             assert_one(cursor, "SELECT * FROM test", [1, set([2])])
 
-    def select_distinct_test(self):
+    def test_select_distinct(self):
         cursor = self.prepare(ordered=True)
 
         # Test a regular (CQL3) table.
@@ -3342,12 +3332,12 @@ class TestCQL(UpgradeTester):
         cursor.execute('CREATE TABLE wide (pk int, name text, val int, PRIMARY KEY(pk, name)) WITH COMPACT STORAGE')
 
         for is_upgraded, cursor in self.do_upgrade(cursor):
-            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
             cursor.execute("TRUNCATE regular")
             cursor.execute("TRUNCATE compact")
             cursor.execute("TRUNCATE wide")
 
-            for i in xrange(0, 3):
+            for i in range(0, 3):
                 cursor.execute('INSERT INTO regular (pk0, pk1, ck0, val) VALUES (%d, %d, 0, 0)' % (i, i))
                 cursor.execute('INSERT INTO regular (pk0, pk1, ck0, val) VALUES (%d, %d, 1, 1)' % (i, i))
 
@@ -3355,14 +3345,14 @@ class TestCQL(UpgradeTester):
 
             assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM regular LIMIT 3', [[0, 0], [1, 1], [2, 2]])
 
-            for i in xrange(0, 3):
+            for i in range(0, 3):
                 cursor.execute('INSERT INTO compact (pk0, pk1, val) VALUES (%d, %d, %d)' % (i, i, i))
 
             assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM compact LIMIT 1', [[0, 0]])
 
             assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM compact LIMIT 3', [[0, 0], [1, 1], [2, 2]])
 
-            for i in xrange(0, 3):
+            for i in range(0, 3):
                 cursor.execute("INSERT INTO wide (pk, name, val) VALUES (%d, 'name0', 0)" % i)
                 cursor.execute("INSERT INTO wide (pk, name, val) VALUES (%d, 'name1', 1)" % i)
 
@@ -3374,12 +3364,12 @@ class TestCQL(UpgradeTester):
             assert_invalid(cursor, 'SELECT DISTINCT pk0 FROM regular', matching="queries must request all the partition key columns")
             assert_invalid(cursor, 'SELECT DISTINCT pk0, pk1, ck0 FROM regular', matching="queries

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[32/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/conftest.py
----------------------------------------------------------------------
diff --git a/conftest.py b/conftest.py
new file mode 100644
index 0000000..650e9c8
--- /dev/null
+++ b/conftest.py
@@ -0,0 +1,489 @@
+import pytest
+import logging
+import os
+import shutil
+import time
+import re
+import platform
+import copy
+import inspect
+import subprocess
+
+from dtest import running_in_docker, cleanup_docker_environment_before_test_execution
+
+from datetime import datetime
+from distutils.version import LooseVersion
+from netifaces import AF_INET
+from psutil import virtual_memory
+
+import netifaces as ni
+
+from ccmlib.common import validate_install_dir, get_version_from_build, is_win
+
+from dtest_setup import DTestSetup
+from dtest_setup_overrides import DTestSetupOverrides
+
+logger = logging.getLogger(__name__)
+
+
+class DTestConfig:
+    def __init__(self):
+        self.use_vnodes = True
+        self.use_off_heap_memtables = False
+        self.num_tokens = -1
+        self.data_dir_count = -1
+        self.force_execution_of_resource_intensive_tests = False
+        self.skip_resource_intensive_tests = False
+        self.cassandra_dir = None
+        self.cassandra_version = None
+        self.delete_logs = False
+        self.execute_upgrade_tests = False
+        self.disable_active_log_watching = False
+        self.keep_test_dir = False
+        self.enable_jacoco_code_coverage = False
+        self.jemalloc_path = find_libjemalloc()
+
+    def setup(self, request):
+        self.use_vnodes = request.config.getoption("--use-vnodes")
+        self.use_off_heap_memtables = request.config.getoption("--use-off-heap-memtables")
+        self.num_tokens = request.config.getoption("--num-tokens")
+        self.data_dir_count = request.config.getoption("--data-dir-count-per-instance")
+        self.force_execution_of_resource_intensive_tests = request.config.getoption("--force-resource-intensive-tests")
+        self.skip_resource_intensive_tests = request.config.getoption("--skip-resource-intensive-tests")
+        if request.config.getoption("--cassandra-dir") is not None:
+            self.cassandra_dir = os.path.expanduser(request.config.getoption("--cassandra-dir"))
+        self.cassandra_version = request.config.getoption("--cassandra-version")
+        self.delete_logs = request.config.getoption("--delete-logs")
+        self.execute_upgrade_tests = request.config.getoption("--execute-upgrade-tests")
+        self.disable_active_log_watching = request.config.getoption("--disable-active-log-watching")
+        self.keep_test_dir = request.config.getoption("--keep-test-dir")
+        self.enable_jacoco_code_coverage = request.config.getoption("--enable-jacoco-code-coverage")
+
+
+def check_required_loopback_interfaces_available():
+    """
+    We need at least 3 loopback interfaces configured to run almost all dtests. On Linux, loopback
+    interfaces are automatically created as they are used, but on Mac they need to be explicitly
+    created. Check if we're running on Mac (Darwin), and if so check we have at least 3 loopback
+    interfaces available, otherwise bail out so we don't run the tests in a known bad config and
+    give the user some helpful advice on how to get their machine into a good known config
+    """
+    if platform.system() == "Darwin":
+        if len(ni.ifaddresses('lo0')[AF_INET]) < 9:
+            pytest.exit("At least 9 loopback interfaces are required to run dtests. "
+                            "On Mac you can create the required loopback interfaces by running "
+                            "'for i in {1..9}; do sudo ifconfig lo0 alias 127.0.0.$i up; done;'")
+
+
+def pytest_addoption(parser):
+    parser.addoption("--use-vnodes", action="store_true", default=False,
+                     help="Determines wither or not to setup clusters using vnodes for tests")
+    parser.addoption("--use-off-heap-memtables", action="store_true", default=False,
+                     help="Enable Off Heap Memtables when creating test clusters for tests")
+    parser.addoption("--num-tokens", action="store", default=256,
+                     help="Number of tokens to set num_tokens yaml setting to when creating instances "
+                          "with vnodes enabled")
+    parser.addoption("--data-dir-count-per-instance", action="store", default=3,
+                     help="Control the number of data directories to create per instance")
+    parser.addoption("--force-resource-intensive-tests", action="store_true", default=False,
+                     help="Forces the execution of tests marked as resource_intensive")
+    parser.addoption("--skip-resource-intensive-tests", action="store_true", default=False,
+                     help="Skip all tests marked as resource_intensive")
+    parser.addoption("--cassandra-dir", action="store", default=None,
+                     help="The directory containing the built C* artifacts to run the tests against. "
+                          "(e.g. the path to the root of a cloned C* git directory. Before executing dtests using "
+                          "this directory you must build C* with 'ant clean jar'). If you're doing C* development and "
+                          "want to run the tests this is almost always going to be the correct option.")
+    parser.addoption("--cassandra-version", action="store", default=None,
+                     help="A specific C* version to run the dtests against. The dtest framework will "
+                          "pull the required artifacts for this version.")
+    parser.addoption("--delete-logs", action="store_true", default=False,
+                     help="Delete all generated logs created by a test after the completion of a test.")
+    parser.addoption("--execute-upgrade-tests", action="store_true", default=False,
+                     help="Execute Cassandra Upgrade Tests (e.g. tests annotated with the upgrade_test mark)")
+    parser.addoption("--disable-active-log-watching", action="store_true", default=False,
+                     help="Disable ccm active log watching, which will cause dtests to check for errors in the "
+                          "logs in a single operation instead of semi-realtime processing by consuming "
+                          "ccm _log_error_handler callbacks")
+    parser.addoption("--keep-test-dir", action="store_true", default=False,
+                     help="Do not remove/cleanup the test ccm cluster directory and it's artifacts "
+                          "after the test completes")
+    parser.addoption("--enable-jacoco-code-coverage", action="store_true", default=False,
+                     help="Enable JaCoCo Code Coverage Support")
+
+
+def sufficient_system_resources_for_resource_intensive_tests():
+    mem = virtual_memory()
+    total_mem_gb = mem.total/1024/1024/1024
+    logger.info("total available system memory is %dGB" % total_mem_gb)
+    # todo kjkj: do not hard code our bound.. for now just do 9 instances at 3gb a piece
+    return total_mem_gb >= 9*3
+
+
+@pytest.fixture(scope='function', autouse=True)
+def fixture_dtest_setup_overrides():
+    """
+    no-op default implementation of fixture_dtest_setup_overrides.
+    we run this when a test class hasn't implemented their own
+    fixture_dtest_setup_overrides
+    """
+    return DTestSetupOverrides()
+
+
+"""
+Not exactly sure why :\ but, this fixture needs to be scoped to function level and not
+session or class. If you invoke pytest with tests across multiple test classes, when scopped
+at session, the root logger appears to get reset between each test class invocation.
+this means that the first test to run not from the first test class (and all subsequent 
+tests), will have the root logger reset and see a level of NOTSET. Scoping it at the
+class level seems to work, and I guess it's not that much extra overhead to setup the
+logger once per test class vs. once per session in the grand scheme of things.
+"""
+@pytest.fixture(scope="function", autouse=True)
+def fixture_logging_setup(request):
+    # set the root logger level to whatever the user asked for
+    # all new loggers created will use the root logger as a template
+    # essentially making this the "default" active log level
+    log_level = logging.INFO
+    try:
+        # first see if logging level overridden by user as command line argument
+        log_level_from_option = pytest.config.getoption("--log-level")
+        if log_level_from_option is not None:
+            log_level = logging.getLevelName(log_level_from_option)
+        else:
+            raise ValueError
+    except ValueError:
+        # nope, user didn't specify it as a command line argument to pytest, check if
+        # we have a default in the loaded pytest.ini. Note: words are seperated in variables
+        # in .ini land with a "_" while the command line arguments use "-"
+        if pytest.config.inicfg.get("log_level") is not None:
+            log_level = logging.getLevelName(pytest.config.inicfg.get("log_level"))
+
+    logging.root.setLevel(log_level)
+
+    logging_format = None
+    try:
+        # first see if logging level overridden by user as command line argument
+        log_format_from_option = pytest.config.getoption("--log-format")
+        if log_format_from_option is not None:
+            logging_format = log_format_from_option
+        else:
+            raise ValueError
+    except ValueError:
+        if pytest.config.inicfg.get("log_format") is not None:
+            logging_format = pytest.config.inicfg.get("log_format")
+
+    logging.basicConfig(level=log_level,
+                        format=logging_format)
+
+    # next, regardless of the level we set above (and requested by the user),
+    # reconfigure the "cassandra" logger to minimum INFO level to override the
+    # logging level that the "cassandra.*" imports should use; DEBUG is just
+    # insanely noisy and verbose, with the extra logging of very limited help
+    # in the context of dtest execution
+    if log_level == logging.DEBUG:
+        cassandra_module_log_level = logging.INFO
+    else:
+        cassandra_module_log_level = log_level
+    logging.getLogger("cassandra").setLevel(cassandra_module_log_level)
+
+
+@pytest.fixture(scope="session")
+def log_global_env_facts(fixture_dtest_config):
+    if pytest.config.pluginmanager.hasplugin('junitxml'):
+        my_junit = getattr(pytest.config, '_xml', None)
+        my_junit.add_global_property('USE_VNODES', fixture_dtest_config.use_vnodes)
+
+
+@pytest.fixture
+def fixture_dtest_config(request, fixture_logging_setup):
+    # although we don't use fixture_logging_setup here, we do want to
+    # have that fixture run as a prerequisite to this one.. and right now
+    # this is the only way that can be done with pytests
+    dtest_config = DTestConfig()
+    dtest_config.setup(request)
+    return dtest_config
+
+
+@pytest.fixture(scope='function', autouse=True)
+def fixture_maybe_skip_tests_requiring_novnodes(request):
+    """
+    Fixture run before the start of every test function that checks if the test is marked with
+    the no_vnodes annotation but the tests were started with a configuration that
+    has vnodes enabled. This should always be a no-op as we explicitly deselect tests
+    in pytest_collection_modifyitems that match this configuration -- but this is explicit :)
+    """
+    if request.node.get_marker('no_vnodes'):
+        if request.config.getoption("--use-vnodes"):
+            pytest.skip("Skipping test marked with no_vnodes as tests executed with vnodes enabled via the "
+                        "--use-vnodes command line argument")
+
+
+@pytest.fixture(scope='function', autouse=True)
+def fixture_log_test_name_and_date(request):
+    logger.info("Starting execution of %s at %s" % (request.node.name, str(datetime.now())))
+
+
+def _filter_errors(dtest_setup, errors):
+    """Filter errors, removing those that match ignore_log_patterns in the current DTestSetup"""
+    for e in errors:
+        for pattern in dtest_setup.ignore_log_patterns:
+            if re.search(pattern, repr(e)):
+                break
+        else:
+            yield e
+
+
+def check_logs_for_errors(dtest_setup):
+    errors = []
+    for node in dtest_setup.cluster.nodelist():
+        errors = list(_filter_errors(dtest_setup, ['\n'.join(msg) for msg in node.grep_log_for_errors()]))
+        if len(errors) is not 0:
+            for error in errors:
+                if isinstance(error, (bytes, bytearray)):
+                    error_str = error.decode("utf-8").strip()
+                else:
+                    error_str = error.strip()
+
+                if error_str:
+                    logger.error("Unexpected error in {node_name} log, error: \n{error}"
+                                 .format(node_name=node.name, error=error_str))
+                    errors.append(error_str)
+                    break
+    return errors
+
+
+def copy_logs(request, cluster, directory=None, name=None):
+    """Copy the current cluster's log files somewhere, by default to LOG_SAVED_DIR with a name of 'last'"""
+    log_saved_dir = "logs"
+    try:
+        os.mkdir(log_saved_dir)
+    except OSError:
+        pass
+
+    if directory is None:
+        directory = log_saved_dir
+    if name is None:
+        name = os.path.join(log_saved_dir, "last")
+    else:
+        name = os.path.join(directory, name)
+    if not os.path.exists(directory):
+        os.mkdir(directory)
+    logs = [(node.name, node.logfilename(), node.debuglogfilename(), node.gclogfilename(), node.compactionlogfilename())
+            for node in list(cluster.nodes.values())]
+    if len(logs) is not 0:
+        basedir = str(int(time.time() * 1000)) + '_' + request.node.name
+        logdir = os.path.join(directory, basedir)
+        os.mkdir(logdir)
+        for n, log, debuglog, gclog, compactionlog in logs:
+            if os.path.exists(log):
+                assert os.path.getsize(log) >= 0
+                shutil.copyfile(log, os.path.join(logdir, n + ".log"))
+            if os.path.exists(debuglog):
+                assert os.path.getsize(debuglog) >= 0
+                shutil.copyfile(debuglog, os.path.join(logdir, n + "_debug.log"))
+            if os.path.exists(gclog):
+                assert os.path.getsize(gclog) >= 0
+                shutil.copyfile(gclog, os.path.join(logdir, n + "_gc.log"))
+            if os.path.exists(compactionlog):
+                assert os.path.getsize(compactionlog) >= 0
+                shutil.copyfile(compactionlog, os.path.join(logdir, n + "_compaction.log"))
+        if os.path.exists(name):
+            os.unlink(name)
+        if not is_win():
+            os.symlink(basedir, name)
+
+
+def reset_environment_vars(initial_environment):
+    pytest_current_test = os.environ.get('PYTEST_CURRENT_TEST')
+    os.environ.clear()
+    os.environ.update(initial_environment)
+    os.environ['PYTEST_CURRENT_TEST'] = pytest_current_test
+
+
+
+
+
+@pytest.fixture(scope='function', autouse=False)
+def fixture_dtest_setup(request, parse_dtest_config, fixture_dtest_setup_overrides, fixture_logging_setup):
+    if running_in_docker():
+        cleanup_docker_environment_before_test_execution()
+
+    # do all of our setup operations to get the enviornment ready for the actual test
+    # to run (e.g. bring up a cluster with the necessary config, populate variables, etc)
+    initial_environment = copy.deepcopy(os.environ)
+    dtest_setup = DTestSetup(dtest_config=parse_dtest_config, setup_overrides=fixture_dtest_setup_overrides)
+    dtest_setup.initialize_cluster()
+
+    if not parse_dtest_config.disable_active_log_watching:
+        dtest_setup.log_watch_thread = dtest_setup.begin_active_log_watch()
+
+    # at this point we're done with our setup operations in this fixture
+    # yield to allow the actual test to run
+    yield dtest_setup
+
+    # phew! we're back after executing the test, now we need to do
+    # all of our teardown and cleanup operations
+
+    reset_environment_vars(initial_environment)
+    dtest_setup.jvm_args = []
+
+    for con in dtest_setup.connections:
+        con.cluster.shutdown()
+    dtest_setup.connections = []
+
+    failed = False
+    try:
+        if not dtest_setup.allow_log_errors:
+            errors = check_logs_for_errors(dtest_setup)
+            if len(errors) > 0:
+                failed = True
+                pytest.fail(msg='Unexpected error found in node logs (see stdout for full details). Errors: [{errors}]'
+                                     .format(errors=str.join(", ", errors)), pytrace=False)
+    finally:
+        try:
+            # save the logs for inspection
+            if failed or not parse_dtest_config.delete_logs:
+                copy_logs(request, dtest_setup.cluster)
+        except Exception as e:
+            logger.error("Error saving log:", str(e))
+        finally:
+            dtest_setup.cleanup_cluster()
+
+
+def _skip_msg(current_running_version, since_version, max_version):
+    if current_running_version < since_version:
+        return "%s < %s" % (current_running_version, since_version)
+    if max_version and current_running_version > max_version:
+        return "%s > %s" % (current_running_version, max_version)
+
+
+@pytest.fixture(autouse=True)
+def fixture_since(request, fixture_dtest_setup):
+    if request.node.get_marker('since'):
+        max_version_str = request.node.get_marker('since').kwargs.get('max_version', None)
+        max_version = None
+        if max_version_str:
+            max_version = LooseVersion(max_version_str)
+
+        since_str = request.node.get_marker('since').args[0]
+        since = LooseVersion(since_str)
+        current_running_version = fixture_dtest_setup.cluster.version()
+        skip_msg = _skip_msg(current_running_version, since, max_version)
+        if skip_msg:
+            pytest.skip(skip_msg)
+
+
+@pytest.fixture(scope='session', autouse=True)
+def install_debugging_signal_handler():
+    import faulthandler
+    faulthandler.enable()
+
+
+@pytest.fixture(scope='function')
+def parse_dtest_config(request):
+    dtest_config = DTestConfig()
+    dtest_config.setup(request)
+
+    # if we're on mac, check that we have the required loopback interfaces before doing anything!
+    check_required_loopback_interfaces_available()
+
+    try:
+        if dtest_config.cassandra_dir is not None:
+            validate_install_dir(dtest_config.cassandra_dir)
+    except Exception as e:
+        pytest.exit("{}. Did you remember to build C*? ('ant clean jar')".format(e))
+
+    yield dtest_config
+
+
+def pytest_collection_modifyitems(items, config):
+    """
+    This function is called upon during the pytest test collection phase and allows for modification
+    of the test items within the list
+    """
+    if not config.getoption("--collect-only") and config.getoption("--cassandra-dir") is None:
+        if config.getoption("--cassandra-version") is None:
+            raise Exception("Required dtest arguments were missing! You must provide either --cassandra-dir "
+                            "or --cassandra-version. Refer to the documentation or invoke the help with --help.")
+
+    selected_items = []
+    deselected_items = []
+
+    sufficient_system_resources_resource_intensive = sufficient_system_resources_for_resource_intensive_tests()
+    logger.debug("has sufficient resources? %s" % sufficient_system_resources_resource_intensive)
+
+    for item in items:
+        #  set a timeout for all tests, it may be overwritten at the test level with an additional marker
+        if not item.get_marker("timeout"):
+            item.add_marker(pytest.mark.timeout(60*15))
+
+        deselect_test = False
+
+        if item.get_marker("resource_intensive"):
+            if config.getoption("--force-resource-intensive-tests"):
+                pass
+            if config.getoption("--skip-resource-intensive-tests"):
+                deselect_test = True
+                logger.info("SKIP: Deselecting test %s as test marked resource_intensive. To force execution of "
+                      "this test re-run with the --force-resource-intensive-tests command line argument" % item.name)
+            if not sufficient_system_resources_resource_intensive:
+                deselect_test = True
+                logger.info("SKIP: Deselecting resource_intensive test %s due to insufficient system resources" % item.name)
+
+        if item.get_marker("no_vnodes"):
+            if config.getoption("--use-vnodes"):
+                deselect_test = True
+                logger.info("SKIP: Deselecting test %s as the test requires vnodes to be disabled. To run this test, "
+                      "re-run without the --use-vnodes command line argument" % item.name)
+
+        if item.get_marker("vnodes"):
+            if not config.getoption("--use-vnodes"):
+                deselect_test = True
+                logger.info("SKIP: Deselecting test %s as the test requires vnodes to be enabled. To run this test, "
+                            "re-run with the --use-vnodes command line argument" % item.name)
+
+        for test_item_class in inspect.getmembers(item.module, inspect.isclass):
+            if not hasattr(test_item_class[1], "pytestmark"):
+                continue
+
+            for module_pytest_mark in test_item_class[1].pytestmark:
+                if module_pytest_mark.name == "upgrade_test":
+                    if not config.getoption("--execute-upgrade-tests"):
+                        deselect_test = True
+
+        if item.get_marker("upgrade_test"):
+            if not config.getoption("--execute-upgrade-tests"):
+                deselect_test = True
+
+        # todo kjkj: deal with no_offheap_memtables mark
+
+        if deselect_test:
+            deselected_items.append(item)
+        else:
+            selected_items.append(item)
+
+    config.hook.pytest_deselected(items=deselected_items)
+    items[:] = selected_items
+
+
+# Determine the location of the libjemalloc jar so that we can specify it
+# through environment variables when start Cassandra.  This reduces startup
+# time, making the dtests run faster.
+def find_libjemalloc():
+    if is_win():
+        # let the normal bat script handle finding libjemalloc
+        return ""
+
+    this_dir = os.path.dirname(os.path.realpath(__file__))
+    script = os.path.join(this_dir, "findlibjemalloc.sh")
+    try:
+        p = subprocess.Popen([script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        if stderr or not stdout:
+            return "-"  # tells C* not to look for libjemalloc
+        else:
+            return stdout
+    except Exception as exc:
+        print("Failed to run script to prelocate libjemalloc ({}): {}".format(script, exc))
+        return ""

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/consistency_test.py
----------------------------------------------------------------------
diff --git a/consistency_test.py b/consistency_test.py
index 2eaa2ca..368dba0 100644
--- a/consistency_test.py
+++ b/consistency_test.py
@@ -1,36 +1,30 @@
-import Queue
+import queue
 import sys
 import threading
 import time
+import pytest
+import logging
 from collections import OrderedDict, namedtuple
 from copy import deepcopy
 
 from cassandra import ConsistencyLevel, consistency_value_to_name
 from cassandra.query import SimpleStatement
-from nose.plugins.attrib import attr
-from nose.tools import assert_greater_equal
 
 from tools.assertions import (assert_all, assert_length_equal, assert_none,
                               assert_unavailable)
-from dtest import DISABLE_VNODES, MultiError, Tester, debug, create_ks, create_cf
+from dtest import MultiError, Tester, create_ks, create_cf
 from tools.data import (create_c1c2_table, insert_c1c2, insert_columns,
                         query_c1c2, rows_to_list)
-from tools.decorators import since
 from tools.jmxutils import JolokiaAgent, make_mbean, remove_perf_disable_shared_mem
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 ExpectedConsistency = namedtuple('ExpectedConsistency', ('num_write_nodes', 'num_read_nodes', 'is_strong'))
 
 
 class TestHelper(Tester):
 
-    def __init__(self, *args, **kwargs):
-        Tester.__init__(self, *args, **kwargs)
-        self.lock = threading.Lock()
-
-    def log(self, message):
-        with self.lock:
-            debug(message)
-
     def _is_local(self, cl):
         return (cl == ConsistencyLevel.LOCAL_QUORUM or
                 cl == ConsistencyLevel.LOCAL_ONE or
@@ -51,12 +45,12 @@ class TestHelper(Tester):
             ConsistencyLevel.ONE: 1,
             ConsistencyLevel.TWO: 2,
             ConsistencyLevel.THREE: 3,
-            ConsistencyLevel.QUORUM: sum(rf_factors) / 2 + 1,
+            ConsistencyLevel.QUORUM: sum(rf_factors) // 2 + 1,
             ConsistencyLevel.ALL: sum(rf_factors),
-            ConsistencyLevel.LOCAL_QUORUM: rf_factors[dc] / 2 + 1,
-            ConsistencyLevel.EACH_QUORUM: rf_factors[dc] / 2 + 1,
-            ConsistencyLevel.SERIAL: sum(rf_factors) / 2 + 1,
-            ConsistencyLevel.LOCAL_SERIAL: rf_factors[dc] / 2 + 1,
+            ConsistencyLevel.LOCAL_QUORUM: rf_factors[dc] // 2 + 1,
+            ConsistencyLevel.EACH_QUORUM: rf_factors[dc] // 2 + 1,
+            ConsistencyLevel.SERIAL: sum(rf_factors) // 2 + 1,
+            ConsistencyLevel.LOCAL_SERIAL: rf_factors[dc] // 2 + 1,
             ConsistencyLevel.LOCAL_ONE: 1,
         }[cl]
 
@@ -73,7 +67,7 @@ class TestHelper(Tester):
                 :return: the data center corresponding to this node
                 """
                 dc = 0
-                for i in xrange(1, len(nodes)):
+                for i in range(1, len(nodes)):
                     if idx < sum(nodes[:i]):
                         break
                     dc += 1
@@ -101,7 +95,7 @@ class TestHelper(Tester):
         if self._is_local(cl):
             return num_nodes_alive[current] >= self._required_nodes(cl, rf_factors, current)
         elif cl == ConsistencyLevel.EACH_QUORUM:
-            for i in xrange(0, len(rf_factors)):
+            for i in range(0, len(rf_factors)):
                 if num_nodes_alive[i] < self._required_nodes(cl, rf_factors, i):
                     return False
             return True
@@ -132,7 +126,7 @@ class TestHelper(Tester):
             # StorageProxy.getLiveSortedEndpoints(), which is called by the AbstractReadExecutor
             # to determine the target replicas. The default case, a SimpleSnitch wrapped in
             # a dynamic snitch, may rarely choose a different replica.
-            debug('Changing snitch for single dc case')
+            logger.debug('Changing snitch for single dc case')
             for node in cluster.nodelist():
                 node.data_center = 'dc1'
             cluster.set_configuration_options(values={
@@ -208,7 +202,7 @@ class TestHelper(Tester):
         expected = [[userid, age]] if age else []
         ret = rows_to_list(res) == expected
         if check_ret:
-            self.assertTrue(ret, "Got {} from {}, expected {} at {}".format(rows_to_list(res), session.cluster.contact_points, expected, consistency_value_to_name(consistency)))
+            assert ret, "Got {} from {}, expected {} at {}".format(rows_to_list(res), session.cluster.contact_points, expected, consistency_value_to_name(consistency))
         return ret
 
     def create_counters_table(self, session, requires_local_reads):
@@ -233,10 +227,10 @@ class TestHelper(Tester):
         statement = SimpleStatement("SELECT * from counters WHERE id = {}".format(id), consistency_level=consistency)
         ret = rows_to_list(session.execute(statement))
         if check_ret:
-            self.assertEqual(ret[0][1], val, "Got {} from {}, expected {} at {}".format(ret[0][1],
+            assert ret[0][1] == val, "Got {} from {}, expected {} at {}".format(ret[0][1],
                                                                                         session.cluster.contact_points,
                                                                                         val,
-                                                                                        consistency_value_to_name(consistency)))
+                                                                                        consistency_value_to_name(consistency))
         return ret[0][1] if ret else 0
 
 
@@ -255,8 +249,8 @@ class TestAvailability(TestHelper):
         rf = self.rf
 
         num_alive = nodes
-        for node in xrange(nodes):
-            debug('Testing node {} in single dc with {} nodes alive'.format(node, num_alive))
+        for node in range(nodes):
+            logger.debug('Testing node {} in single dc with {} nodes alive'.format(node, num_alive))
             session = self.patient_exclusive_cql_connection(cluster.nodelist()[node], self.ksname)
             for combination in combinations:
                 self._test_insert_query_from_node(session, 0, [rf], [num_alive], *combination)
@@ -274,12 +268,12 @@ class TestAvailability(TestHelper):
         rf = self.rf
 
         nodes_alive = deepcopy(nodes)
-        rf_factors = rf.values()
+        rf_factors = list(rf.values())
 
-        for i in xrange(0, len(nodes)):  # for each dc
-            self.log('Testing dc {} with rf {} and {} nodes alive'.format(i, rf_factors[i], nodes_alive))
-            for n in xrange(nodes[i]):  # for each node in this dc
-                self.log('Testing node {} in dc {} with {} nodes alive'.format(n, i, nodes_alive))
+        for i in range(0, len(nodes)):  # for each dc
+            logger.debug('Testing dc {} with rf {} and {} nodes alive'.format(i, rf_factors[i], nodes_alive))
+            for n in range(nodes[i]):  # for each node in this dc
+                logger.debug('Testing node {} in dc {} with {} nodes alive'.format(n, i, nodes_alive))
                 node = n + sum(nodes[:i])
                 session = self.patient_exclusive_cql_connection(cluster.nodelist()[node], self.ksname)
                 for combination in combinations:
@@ -292,7 +286,7 @@ class TestAvailability(TestHelper):
         """
         Test availability for read and write via the session passed in as a parameter.
         """
-        self.log("Connected to %s for %s/%s/%s" %
+        logger.debug("Connected to %s for %s/%s/%s" %
                  (session.cluster.contact_points, consistency_value_to_name(write_cl), consistency_value_to_name(read_cl), consistency_value_to_name(serial_cl)))
 
         start = 0
@@ -300,13 +294,13 @@ class TestAvailability(TestHelper):
         age = 30
 
         if self._should_succeed(write_cl, rf_factors, num_nodes_alive, dc_idx):
-            for n in xrange(start, end):
+            for n in range(start, end):
                 self.insert_user(session, n, age, write_cl, serial_cl)
         else:
             assert_unavailable(self.insert_user, session, end, age, write_cl, serial_cl)
 
         if self._should_succeed(read_cl, rf_factors, num_nodes_alive, dc_idx):
-            for n in xrange(start, end):
+            for n in range(start, end):
                 self.query_user(session, n, age, read_cl, check_ret)
         else:
             assert_unavailable(self.query_user, session, end, age, read_cl, check_ret)
@@ -361,7 +355,7 @@ class TestAvailability(TestHelper):
 
         self._test_simple_strategy(combinations)
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     def test_network_topology_strategy(self):
         """
         Test for multiple datacenters, using network topology replication strategy.
@@ -393,7 +387,7 @@ class TestAvailability(TestHelper):
 
         self._test_network_topology_strategy(combinations)
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     @since("3.0")
     def test_network_topology_strategy_each_quorum(self):
         """
@@ -432,7 +426,7 @@ class TestAccuracy(TestHelper):
             self.read_cl = read_cl
             self.serial_cl = serial_cl
 
-            outer.log('Testing accuracy with WRITE/READ/SERIAL consistency set to {}/{}/{} (keys : {} to {})'
+            logger.debug('Testing accuracy with WRITE/READ/SERIAL consistency set to {}/{}/{} (keys : {} to {})'
                       .format(consistency_value_to_name(write_cl), consistency_value_to_name(read_cl), consistency_value_to_name(serial_cl), start, end - 1))
 
         def get_expected_consistency(self, idx):
@@ -459,12 +453,10 @@ class TestAccuracy(TestHelper):
                 for s in sessions:
                     if outer.query_user(s, n, val, read_cl, check_ret=expected_consistency.is_strong):
                         num += 1
-                assert_greater_equal(num, expected_consistency.num_write_nodes,
-                                     "Failed to read value from sufficient number of nodes,"
-                                     " required {} but got {} - [{}, {}]"
-                                     .format(expected_consistency.num_write_nodes, num, n, val))
+                assert num >= expected_consistency.num_write_nodes, "Failed to read value from sufficient number of nodes," + \
+                                     " required {} but got {} - [{}, {}]".format(expected_consistency.num_write_nodes, num, n, val)
 
-            for n in xrange(start, end):
+            for n in range(start, end):
                 age = 30
                 for s in range(0, len(sessions)):
                     outer.insert_user(sessions[s], n, age, write_cl, serial_cl)
@@ -499,12 +491,10 @@ class TestAccuracy(TestHelper):
                 for s in sessions:
                     results.append(outer.query_counter(s, n, val, read_cl, check_ret=expected_consistency.is_strong))
 
-                assert_greater_equal(results.count(val), expected_consistency.num_write_nodes,
-                                     "Failed to read value from sufficient number of nodes, required {} nodes to have a"
-                                     " counter value of {} at key {}, instead got these values: {}"
-                                     .format(expected_consistency.num_write_nodes, val, n, results))
+                assert results.count(val) >= expected_consistency.num_write_nodes, "Failed to read value from sufficient number of nodes, required {} nodes to have a" + \
+                                     " counter value of {} at key {}, instead got these values: {}".format(expected_consistency.num_write_nodes, val, n, results)
 
-            for n in xrange(start, end):
+            for n in range(start, end):
                 c = 1
                 for s in range(0, len(sessions)):
                     outer.update_counter(sessions[s], n, write_cl, serial_cl)
@@ -534,15 +524,15 @@ class TestAccuracy(TestHelper):
 
         self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
 
-        input_queue = Queue.Queue()
-        exceptions_queue = Queue.Queue()
+        input_queue = queue.Queue()
+        exceptions_queue = queue.Queue()
 
         def run():
             while not input_queue.empty():
                 try:
                     v = TestAccuracy.Validation(self, self.sessions, nodes, rf_factors, *input_queue.get(block=False))
                     valid_fcn(v)
-                except Queue.Empty:
+                except queue.Empty:
                     pass
                 except Exception:
                     exceptions_queue.put(sys.exc_info())
@@ -560,17 +550,17 @@ class TestAccuracy(TestHelper):
             t.start()
             threads.append(t)
 
-        self.log("Waiting for workers to complete")
+        logger.debug("Waiting for workers to complete")
         while exceptions_queue.empty():
             time.sleep(0.1)
-            if len(filter(lambda t: t.isAlive(), threads)) == 0:
+            if len([t for t in threads if t.isAlive()]) == 0:
                 break
 
         if not exceptions_queue.empty():
-            _, exceptions, tracebacks = zip(*exceptions_queue.queue)
+            _, exceptions, tracebacks = list(zip(*exceptions_queue.queue))
             raise MultiError(exceptions=exceptions, tracebacks=tracebacks)
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     def test_simple_strategy_users(self):
         """
         Test for a single datacenter, users table, only the each quorum reads.
@@ -599,10 +589,10 @@ class TestAccuracy(TestHelper):
             (ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
         ]
 
-        self.log("Testing single dc, users")
+        logger.debug("Testing single dc, users")
         self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     @since("3.0")
     def test_simple_strategy_each_quorum_users(self):
         """
@@ -617,10 +607,10 @@ class TestAccuracy(TestHelper):
             (ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
         ]
 
-        self.log("Testing single dc, users, each quorum reads")
+        logger.debug("Testing single dc, users, each quorum reads")
         self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     def test_network_topology_strategy_users(self):
         """
         Test for multiple datacenters, users table.
@@ -653,10 +643,10 @@ class TestAccuracy(TestHelper):
             (ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
         ]
 
-        self.log("Testing multiple dcs, users")
-        self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, self.rf.values(), combinations),
+        logger.debug("Testing multiple dcs, users")
+        self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations),
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     @since("3.0")
     def test_network_topology_strategy_each_quorum_users(self):
         """
@@ -672,8 +662,8 @@ class TestAccuracy(TestHelper):
             (ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
         ]
 
-        self.log("Testing multiple dcs, users, each quorum reads")
-        self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, self.rf.values(), combinations)
+        logger.debug("Testing multiple dcs, users, each quorum reads")
+        self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations)
 
     def test_simple_strategy_counters(self):
         """
@@ -700,7 +690,7 @@ class TestAccuracy(TestHelper):
             (ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
         ]
 
-        self.log("Testing single dc, counters")
+        logger.debug("Testing single dc, counters")
         self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, [self.nodes], [self.rf], combinations)
 
     @since("3.0")
@@ -718,10 +708,10 @@ class TestAccuracy(TestHelper):
             (ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
         ]
 
-        self.log("Testing single dc, counters, each quorum reads")
+        logger.debug("Testing single dc, counters, each quorum reads")
         self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, [self.nodes], [self.rf], combinations)
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     def test_network_topology_strategy_counters(self):
         """
         Test for multiple datacenters, counters table.
@@ -749,10 +739,10 @@ class TestAccuracy(TestHelper):
             (ConsistencyLevel.TWO, ConsistencyLevel.ONE),
         ]
 
-        self.log("Testing multiple dcs, counters")
-        self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, self.rf.values(), combinations),
+        logger.debug("Testing multiple dcs, counters")
+        self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
 
-    @attr("resource-intensive")
+    @pytest.mark.resource_intensive
     @since("3.0")
     def test_network_topology_strategy_each_quorum_counters(self):
         """
@@ -768,8 +758,8 @@ class TestAccuracy(TestHelper):
             (ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
         ]
 
-        self.log("Testing multiple dcs, counters, each quorum reads")
-        self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, self.rf.values(), combinations),
+        logger.debug("Testing multiple dcs, counters, each quorum reads")
+        self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
 
 
 class TestConsistency(Tester):
@@ -1105,7 +1095,7 @@ class TestConsistency(Tester):
         srp = make_mbean('metrics', type='Table', name='ShortReadProtectionRequests', keyspace='test', scope='test')
         with JolokiaAgent(node1) as jmx:
             # 4 srp requests for node1 and 5 for node2, total of 9
-            self.assertEqual(9, jmx.read_attribute(srp, 'Count'))
+            assert 9 == jmx.read_attribute(srp, 'Count')
 
     @since('3.0')
     def test_12872(self):
@@ -1174,12 +1164,17 @@ class TestConsistency(Tester):
                    [[0], [4]],
                    cl=ConsistencyLevel.ALL)
 
-    def short_read_test(self):
+    def test_short_read(self):
         """
         @jira_ticket CASSANDRA-9460
         """
         cluster = self.cluster
 
+        # this test causes the python driver to be extremely noisy due to
+        # frequent starting and stopping of nodes. let's move the log level
+        # of the driver to ERROR for this test only
+        logging.getLogger("cassandra").setLevel('ERROR')
+
         # Disable hinted handoff and set batch commit log so this doesn't
         # interfer with the test
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
@@ -1196,13 +1191,13 @@ class TestConsistency(Tester):
         reversed_key = 'reversed'
 
         # Repeat this test 10 times to make it more easy to spot a null pointer exception caused by a race, see CASSANDRA-9460
-        for k in xrange(10):
+        for k in range(10):
             # insert 9 columns in two rows
             insert_columns(self, session, normal_key, 9)
             insert_columns(self, session, reversed_key, 9)
 
             # Delete 3 first columns (and 3 last columns, for the reversed version) with a different node dead each time
-            for node, column_number_to_delete in zip(range(1, 4), range(3)):
+            for node, column_number_to_delete in zip(list(range(1, 4)), list(range(3))):
                 self.stop_node(node)
                 self.delete(node, normal_key, column_number_to_delete)
                 self.delete(node, reversed_key, 8 - column_number_to_delete)
@@ -1218,8 +1213,8 @@ class TestConsistency(Tester):
             assert_length_equal(res, 3)
 
             # value 0, 1 and 2 have been deleted
-            for i in xrange(1, 4):
-                self.assertEqual('value{}'.format(i + 2), res[i - 1][1])
+            for i in range(1, 4):
+                assert 'value{}'.format(i + 2) == res[i - 1][1]
 
             # Query 3 firsts columns in reverse order
             session = self.patient_cql_connection(node1, 'ks')
@@ -1231,12 +1226,12 @@ class TestConsistency(Tester):
             assert_length_equal(res, 3)
 
             # value 6, 7 and 8 have been deleted
-            for i in xrange(0, 3):
-                self.assertEqual('value{}'.format(5 - i), res[i][1])
+            for i in range(0, 3):
+                assert 'value{}'.format(5 - i) == res[i][1]
 
             session.execute('TRUNCATE cf')
 
-    def short_read_delete_test(self):
+    def test_short_read_delete(self):
         """ Test short reads ultimately leaving no columns alive [#4000] """
         cluster = self.cluster
 
@@ -1269,7 +1264,7 @@ class TestConsistency(Tester):
 
         assert_none(session, "SELECT c, v FROM cf WHERE key=\'k0\' LIMIT 1", cl=ConsistencyLevel.QUORUM)
 
-    def short_read_quorum_delete_test(self):
+    def test_short_read_quorum_delete(self):
         """
         @jira_ticket CASSANDRA-8933
         """
@@ -1311,11 +1306,11 @@ class TestConsistency(Tester):
         node3.stop(wait_other_notice=True)
         assert_none(session, "SELECT * FROM t WHERE id = 0 LIMIT 1", cl=ConsistencyLevel.QUORUM)
 
-    def readrepair_test(self):
+    def test_readrepair(self):
         cluster = self.cluster
         cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
 
-        if DISABLE_VNODES:
+        if not self.dtest_config.use_vnodes:
             cluster.populate(2).start()
         else:
             tokens = cluster.balanced_tokens(2)
@@ -1333,43 +1328,43 @@ class TestConsistency(Tester):
         node2.start(wait_for_binary_proto=True, wait_other_notice=True)
 
         # query everything to cause RR
-        for n in xrange(0, 10000):
+        for n in range(0, 10000):
             query_c1c2(session, n, ConsistencyLevel.QUORUM)
 
         node1.stop(wait_other_notice=True)
 
         # Check node2 for all the keys that should have been repaired
         session = self.patient_cql_connection(node2, keyspace='ks')
-        for n in xrange(0, 10000):
+        for n in range(0, 10000):
             query_c1c2(session, n, ConsistencyLevel.ONE)
 
-    def quorum_available_during_failure_test(self):
-        CL = ConsistencyLevel.QUORUM
-        RF = 3
+    def test_quorum_available_during_failure(self):
+        cl = ConsistencyLevel.QUORUM
+        rf = 3
 
-        debug("Creating a ring")
+        logger.debug("Creating a ring")
         cluster = self.cluster
-        if DISABLE_VNODES:
+        if not self.dtest_config.use_vnodes:
             cluster.populate(3).start()
         else:
             tokens = cluster.balanced_tokens(3)
             cluster.populate(3, tokens=tokens).start()
         node1, node2, node3 = cluster.nodelist()
 
-        debug("Set to talk to node 2")
+        logger.debug("Set to talk to node 2")
         session = self.patient_cql_connection(node2)
-        create_ks(session, 'ks', RF)
+        create_ks(session, 'ks', rf)
         create_c1c2_table(self, session)
 
-        debug("Generating some data")
-        insert_c1c2(session, n=100, consistency=CL)
+        logger.debug("Generating some data")
+        insert_c1c2(session, n=100, consistency=cl)
 
-        debug("Taking down node1")
+        logger.debug("Taking down node1")
         node1.stop(wait_other_notice=True)
 
-        debug("Reading back data.")
-        for n in xrange(100):
-            query_c1c2(session, n, CL)
+        logger.debug("Reading back data.")
+        for n in range(100):
+            query_c1c2(session, n, cl)
 
     def stop_node(self, node_number):
         to_stop = self.cluster.nodes["node%d" % node_number]

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/consistent_bootstrap_test.py
----------------------------------------------------------------------
diff --git a/consistent_bootstrap_test.py b/consistent_bootstrap_test.py
index ada9b39..24626f3 100644
--- a/consistent_bootstrap_test.py
+++ b/consistent_bootstrap_test.py
@@ -1,92 +1,100 @@
+import pytest
+import logging
+
 from cassandra import ConsistencyLevel
 
-from dtest import Tester, debug, create_ks
+from dtest import Tester, create_ks
 from tools.data import create_c1c2_table, insert_c1c2, query_c1c2
-from tools.decorators import no_vnodes
 from tools.misc import new_node
 
+logger = logging.getLogger(__name__)
+
 
 class TestBootstrapConsistency(Tester):
 
-    @no_vnodes()
-    def consistent_reads_after_move_test(self):
-        debug("Creating a ring")
+    @pytest.mark.no_vnodes
+    def test_consistent_reads_after_move(self):
+        logger.debug("Creating a ring")
         cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000,
-                                                  'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0})
+        cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
+                                                  'write_request_timeout_in_ms': 60000,
+                                                  'read_request_timeout_in_ms': 60000,
+                                                  'dynamic_snitch_badness_threshold': 0.0})
         cluster.set_batch_commitlog(enabled=True)
 
         cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
         node1, node2, node3 = cluster.nodelist()
 
-        debug("Set to talk to node 2")
+        logger.debug("Set to talk to node 2")
         n2session = self.patient_cql_connection(node2)
         create_ks(n2session, 'ks', 2)
         create_c1c2_table(self, n2session)
 
-        debug("Generating some data for all nodes")
-        insert_c1c2(n2session, keys=range(10, 20), consistency=ConsistencyLevel.ALL)
+        logger.debug("Generating some data for all nodes")
+        insert_c1c2(n2session, keys=list(range(10, 20)), consistency=ConsistencyLevel.ALL)
 
         node1.flush()
-        debug("Taking down node1")
+        logger.debug("Taking down node1")
         node1.stop(wait_other_notice=True)
 
-        debug("Writing data to node2")
-        insert_c1c2(n2session, keys=range(30, 1000), consistency=ConsistencyLevel.ONE)
+        logger.debug("Writing data to node2")
+        insert_c1c2(n2session, keys=list(range(30, 1000)), consistency=ConsistencyLevel.ONE)
         node2.flush()
 
-        debug("Restart node1")
+        logger.debug("Restart node1")
         node1.start(wait_other_notice=True)
 
-        debug("Move token on node3")
+        logger.debug("Move token on node3")
         node3.move(2)
 
-        debug("Checking that no data was lost")
-        for n in xrange(10, 20):
+        logger.debug("Checking that no data was lost")
+        for n in range(10, 20):
             query_c1c2(n2session, n, ConsistencyLevel.ALL)
 
-        for n in xrange(30, 1000):
+        for n in range(30, 1000):
             query_c1c2(n2session, n, ConsistencyLevel.ALL)
 
-    def consistent_reads_after_bootstrap_test(self):
-        debug("Creating a ring")
+    def test_consistent_reads_after_bootstrap(self):
+        logger.debug("Creating a ring")
         cluster = self.cluster
-        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000,
-                                                  'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0})
+        cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
+                                                  'write_request_timeout_in_ms': 60000,
+                                                  'read_request_timeout_in_ms': 60000,
+                                                  'dynamic_snitch_badness_threshold': 0.0})
         cluster.set_batch_commitlog(enabled=True)
 
         cluster.populate(2)
         node1, node2 = cluster.nodelist()
         cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
 
-        debug("Set to talk to node 2")
+        logger.debug("Set to talk to node 2")
         n2session = self.patient_cql_connection(node2)
         create_ks(n2session, 'ks', 2)
         create_c1c2_table(self, n2session)
 
-        debug("Generating some data for all nodes")
-        insert_c1c2(n2session, keys=range(10, 20), consistency=ConsistencyLevel.ALL)
+        logger.debug("Generating some data for all nodes")
+        insert_c1c2(n2session, keys=list(range(10, 20)), consistency=ConsistencyLevel.ALL)
 
         node1.flush()
-        debug("Taking down node1")
+        logger.debug("Taking down node1")
         node1.stop(wait_other_notice=True)
 
-        debug("Writing data to only node2")
-        insert_c1c2(n2session, keys=range(30, 1000), consistency=ConsistencyLevel.ONE)
+        logger.debug("Writing data to only node2")
+        insert_c1c2(n2session, keys=list(range(30, 1000)), consistency=ConsistencyLevel.ONE)
         node2.flush()
 
-        debug("Restart node1")
+        logger.debug("Restart node1")
         node1.start(wait_other_notice=True)
 
-        debug("Bootstraping node3")
+        logger.debug("Bootstraping node3")
         node3 = new_node(cluster)
         node3.start(wait_for_binary_proto=True)
 
         n3session = self.patient_cql_connection(node3)
         n3session.execute("USE ks")
-        debug("Checking that no data was lost")
-        for n in xrange(10, 20):
+        logger.debug("Checking that no data was lost")
+        for n in range(10, 20):
             query_c1c2(n3session, n, ConsistencyLevel.ALL)
 
-        for n in xrange(30, 1000):
+        for n in range(30, 1000):
             query_c1c2(n3session, n, ConsistencyLevel.ALL)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/counter_test.py
----------------------------------------------------------------------
diff --git a/counter_test.py b/counter_test.py
new file mode 100644
index 0000000..ae87d95
--- /dev/null
+++ b/counter_test.py
@@ -0,0 +1,417 @@
+import random
+import time
+import uuid
+import pytest
+import logging
+
+from cassandra import ConsistencyLevel
+from cassandra.query import SimpleStatement
+
+from tools.assertions import assert_invalid, assert_length_equal, assert_one
+from dtest import Tester, create_ks, create_cf
+from tools.data import rows_to_list
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
+
+class TestCounters(Tester):
+
+    @since('3.0', max_version='3.12')
+    def test_13691(self):
+        """
+        2.0 -> 2.1 -> 3.0 counters upgrade test
+        @jira_ticket CASSANDRA-13691
+        """
+        cluster = self.cluster
+        default_install_dir = cluster.get_install_dir()
+
+        #
+        # set up a 2.0 cluster with 3 nodes and set up schema
+        #
+
+        cluster.set_install_dir(version='2.0.17')
+        cluster.populate(3)
+        cluster.start()
+
+        node1, node2, node3 = cluster.nodelist()
+
+        session = self.patient_cql_connection(node1)
+        session.execute("""
+            CREATE KEYSPACE test
+                WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
+            """)
+        session.execute("CREATE TABLE test.test (id int PRIMARY KEY, c counter);")
+
+        #
+        # generate some 2.0 counter columns with local shards
+        #
+
+        query = "UPDATE test.test SET c = c + 1 WHERE id = ?"
+        prepared = session.prepare(query)
+        for i in range(0, 1000):
+            session.execute(prepared, [i])
+
+        cluster.flush()
+        cluster.stop()
+
+        #
+        # upgrade cluster to 2.1
+        #
+
+        cluster.set_install_dir(version='2.1.17')
+        cluster.start()
+        cluster.nodetool("upgradesstables")
+
+        #
+        # upgrade node3 to current (3.0.x or 3.11.x)
+        #
+
+        node3.stop(wait_other_notice=True)
+        node3.set_install_dir(install_dir=default_install_dir)
+        node3.start(wait_other_notice=True)
+
+        #
+        # with a 2.1 coordinator, try to read the table with CL.ALL
+        #
+
+        session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.ALL)
+        assert_one(session, "SELECT COUNT(*) FROM test.test", [1000])
+
+    @pytest.mark.vnodes
+    def test_counter_leader_with_partial_view(self):
+        """
+        Test leader election with a starting node.
+
+        Testing that nodes do not elect as mutation leader a node with a partial view on the cluster.
+        Note that byteman rules can be syntax checked via the following command:
+            sh ./bin/bytemancheck.sh -cp ~/path_to/apache-cassandra-3.0.14-SNAPSHOT.jar ~/path_to/rule.btm
+
+        @jira_ticket CASSANDRA-13043
+        """
+        cluster = self.cluster
+
+        cluster.populate(3, use_vnodes=True, install_byteman=True)
+        nodes = cluster.nodelist()
+        # Have node 1 and 3 cheat a bit during the leader election for a counter mutation; note that cheating
+        # takes place iff there is an actual chance for node 2 to be picked.
+        if cluster.version() < '4.0':
+            nodes[0].update_startup_byteman_script('./byteman/pre4.0/election_counter_leader_favor_node2.btm')
+            nodes[2].update_startup_byteman_script('./byteman/pre4.0/election_counter_leader_favor_node2.btm')
+        else:
+            nodes[0].update_startup_byteman_script('./byteman/4.0/election_counter_leader_favor_node2.btm')
+            nodes[2].update_startup_byteman_script('./byteman/4.0/election_counter_leader_favor_node2.btm')
+
+        cluster.start(wait_for_binary_proto=True)
+        session = self.patient_cql_connection(nodes[0])
+        create_ks(session, 'ks', 3)
+        create_cf(session, 'cf', validation="CounterColumnType", columns={'c': 'counter'})
+
+        # Now stop the node and restart but first install a rule to slow down how fast node 2 will update the list
+        # nodes that are alive
+        nodes[1].stop(wait=True, wait_other_notice=False)
+        nodes[1].update_startup_byteman_script('./byteman/gossip_alive_callback_sleep.btm')
+        nodes[1].start(no_wait=True, wait_other_notice=False)
+
+        # Until node 2 is fully alive try to force other nodes to pick him as mutation leader.
+        # If CASSANDRA-13043 is fixed, they will not. Otherwise they will do, but since we are slowing down how
+        # fast node 2 updates the list of nodes that are alive, it will just have a partial view on the cluster
+        # and thus will raise an 'UnavailableException' exception.
+        nb_attempts = 50000
+        for i in range(0, nb_attempts):
+            # Change the name of the counter for the sake of randomization
+            q = SimpleStatement(
+                query_string="UPDATE ks.cf SET c = c + 1 WHERE key = 'counter_%d'" % i,
+                consistency_level=ConsistencyLevel.QUORUM
+            )
+            session.execute(q)
+
+    def test_simple_increment(self):
+        """ Simple incrementation test (Created for #3465, that wasn't a bug) """
+        cluster = self.cluster
+
+        cluster.populate(3).start()
+        nodes = cluster.nodelist()
+
+        session = self.patient_cql_connection(nodes[0])
+        create_ks(session, 'ks', 3)
+        create_cf(session, 'cf', validation="CounterColumnType", columns={'c': 'counter'})
+
+        sessions = [self.patient_cql_connection(node, 'ks') for node in nodes]
+        nb_increment = 50
+        nb_counter = 10
+
+        for i in range(0, nb_increment):
+            for c in range(0, nb_counter):
+                session = sessions[(i + c) % len(nodes)]
+                query = SimpleStatement("UPDATE cf SET c = c + 1 WHERE key = 'counter%i'" % c, consistency_level=ConsistencyLevel.QUORUM)
+                session.execute(query)
+
+            session = sessions[i % len(nodes)]
+            keys = ",".join(["'counter%i'" % c for c in range(0, nb_counter)])
+            query = SimpleStatement("SELECT key, c FROM cf WHERE key IN (%s)" % keys, consistency_level=ConsistencyLevel.QUORUM)
+            res = list(session.execute(query))
+
+            assert_length_equal(res, nb_counter)
+            for c in range(0, nb_counter):
+                assert len(res[c]) == 2, "Expecting key and counter for counter {}, got {}".format(c, str(res[c]))
+                assert res[c][1] == i + 1, "Expecting counter {} = {}, got {}".format(c, i + 1, res[c][0])
+
+    def test_upgrade(self):
+        """ Test for bug of #4436 """
+        cluster = self.cluster
+
+        cluster.populate(2).start()
+        nodes = cluster.nodelist()
+
+        session = self.patient_cql_connection(nodes[0])
+        create_ks(session, 'ks', 2)
+
+        query = """
+            CREATE TABLE counterTable (
+                k int PRIMARY KEY,
+                c counter
+            )
+        """
+        query = query + "WITH compression = { 'sstable_compression' : 'SnappyCompressor' }"
+
+        session.execute(query)
+        time.sleep(2)
+
+        keys = list(range(0, 4))
+        updates = 50
+
+        def make_updates():
+            session = self.patient_cql_connection(nodes[0], keyspace='ks')
+            upd = "UPDATE counterTable SET c = c + 1 WHERE k = %d;"
+            batch = " ".join(["BEGIN COUNTER BATCH"] + [upd % x for x in keys] + ["APPLY BATCH;"])
+
+            for i in range(0, updates):
+                query = SimpleStatement(batch, consistency_level=ConsistencyLevel.QUORUM)
+                session.execute(query)
+
+        def check(i):
+            session = self.patient_cql_connection(nodes[0], keyspace='ks')
+            query = SimpleStatement("SELECT * FROM counterTable", consistency_level=ConsistencyLevel.QUORUM)
+            rows = list(session.execute(query))
+
+            assert len(rows) == len(keys), "Expected {} rows, got {}: {}".format(len(keys), len(rows), str(rows))
+            for row in rows:
+                assert row[1], i * updates == "Unexpected value {}".format(str(row))
+
+        def rolling_restart():
+            # Rolling restart
+            for i in range(0, 2):
+                time.sleep(.2)
+                nodes[i].nodetool("drain")
+                nodes[i].stop(wait_other_notice=False)
+                nodes[i].start(wait_other_notice=True, wait_for_binary_proto=True)
+                time.sleep(.2)
+
+        make_updates()
+        check(1)
+        rolling_restart()
+
+        make_updates()
+        check(2)
+        rolling_restart()
+
+        make_updates()
+        check(3)
+        rolling_restart()
+
+        check(3)
+
+    def test_counter_consistency(self):
+        """
+        Do a bunch of writes with ONE, read back with ALL and check results.
+        """
+        cluster = self.cluster
+        cluster.populate(3).start()
+        node1, node2, node3 = cluster.nodelist()
+        session = self.patient_cql_connection(node1)
+        create_ks(session, 'counter_tests', 3)
+
+        stmt = """
+              CREATE TABLE counter_table (
+              id uuid PRIMARY KEY,
+              counter_one COUNTER,
+              counter_two COUNTER,
+              )
+           """
+        session.execute(stmt)
+
+        counters = []
+        # establish 50 counters (2x25 rows)
+        for i in range(25):
+            _id = str(uuid.uuid4())
+            counters.append(
+                {_id: {'counter_one': 1, 'counter_two': 1}}
+            )
+
+            query = SimpleStatement("""
+                UPDATE counter_table
+                SET counter_one = counter_one + 1, counter_two = counter_two + 1
+                where id = {uuid}""".format(uuid=_id), consistency_level=ConsistencyLevel.ONE)
+            session.execute(query)
+
+        # increment a bunch of counters with CL.ONE
+        for i in range(10000):
+            counter = counters[random.randint(0, len(counters) - 1)]
+            counter_id = list(counter.keys())[0]
+
+            query = SimpleStatement("""
+                UPDATE counter_table
+                SET counter_one = counter_one + 2
+                where id = {uuid}""".format(uuid=counter_id), consistency_level=ConsistencyLevel.ONE)
+            session.execute(query)
+
+            query = SimpleStatement("""
+                UPDATE counter_table
+                SET counter_two = counter_two + 10
+                where id = {uuid}""".format(uuid=counter_id), consistency_level=ConsistencyLevel.ONE)
+            session.execute(query)
+
+            query = SimpleStatement("""
+                UPDATE counter_table
+                SET counter_one = counter_one - 1
+                where id = {uuid}""".format(uuid=counter_id), consistency_level=ConsistencyLevel.ONE)
+            session.execute(query)
+
+            query = SimpleStatement("""
+                UPDATE counter_table
+                SET counter_two = counter_two - 5
+                where id = {uuid}""".format(uuid=counter_id), consistency_level=ConsistencyLevel.ONE)
+            session.execute(query)
+
+            # update expectations to match (assumed) db state
+            counter[counter_id]['counter_one'] += 1
+            counter[counter_id]['counter_two'] += 5
+
+        # let's verify the counts are correct, using CL.ALL
+        for counter_dict in counters:
+            counter_id = list(counter_dict.keys())[0]
+
+            query = SimpleStatement("""
+                SELECT counter_one, counter_two
+                FROM counter_table WHERE id = {uuid}
+                """.format(uuid=counter_id), consistency_level=ConsistencyLevel.ALL)
+            rows = list(session.execute(query))
+
+            counter_one_actual, counter_two_actual = rows[0]
+
+            assert counter_one_actual == counter_dict[counter_id]['counter_one']
+            assert counter_two_actual == counter_dict[counter_id]['counter_two']
+
+    def test_multi_counter_update(self):
+        """
+        Test for singlular update statements that will affect multiple counters.
+        """
+        cluster = self.cluster
+        cluster.populate(3).start()
+        node1, node2, node3 = cluster.nodelist()
+        session = self.patient_cql_connection(node1)
+        create_ks(session, 'counter_tests', 3)
+
+        session.execute("""
+            CREATE TABLE counter_table (
+            id text,
+            myuuid uuid,
+            counter_one COUNTER,
+            PRIMARY KEY (id, myuuid))
+            """)
+
+        expected_counts = {}
+
+        # set up expectations
+        for i in range(1, 6):
+            _id = uuid.uuid4()
+
+            expected_counts[_id] = i
+
+        for k, v in list(expected_counts.items()):
+            session.execute("""
+                UPDATE counter_table set counter_one = counter_one + {v}
+                WHERE id='foo' and myuuid = {k}
+                """.format(k=k, v=v))
+
+        for k, v in list(expected_counts.items()):
+            count = list(session.execute("""
+                SELECT counter_one FROM counter_table
+                WHERE id = 'foo' and myuuid = {k}
+                """.format(k=k)))
+
+            assert v == count[0][0]
+
+    @since("2.0", max_version="3.X")
+    def test_validate_empty_column_name(self):
+        cluster = self.cluster
+        cluster.populate(1).start()
+        node1 = cluster.nodelist()[0]
+        session = self.patient_cql_connection(node1)
+        create_ks(session, 'counter_tests', 1)
+
+        session.execute("""
+            CREATE TABLE compact_counter_table (
+                pk int,
+                ck text,
+                value counter,
+                PRIMARY KEY (pk, ck))
+            WITH COMPACT STORAGE
+            """)
+
+        assert_invalid(session, "UPDATE compact_counter_table SET value = value + 1 WHERE pk = 0 AND ck = ''")
+        assert_invalid(session, "UPDATE compact_counter_table SET value = value - 1 WHERE pk = 0 AND ck = ''")
+
+        session.execute("UPDATE compact_counter_table SET value = value + 5 WHERE pk = 0 AND ck = 'ck'")
+        session.execute("UPDATE compact_counter_table SET value = value - 2 WHERE pk = 0 AND ck = 'ck'")
+
+        assert_one(session, "SELECT pk, ck, value FROM compact_counter_table", [0, 'ck', 3])
+
+    @since('2.0')
+    def test_drop_counter_column(self):
+        """Test for CASSANDRA-7831"""
+        cluster = self.cluster
+        cluster.populate(1).start()
+        node1, = cluster.nodelist()
+        session = self.patient_cql_connection(node1)
+        create_ks(session, 'counter_tests', 1)
+
+        session.execute("CREATE TABLE counter_bug (t int, c counter, primary key(t))")
+
+        session.execute("UPDATE counter_bug SET c = c + 1 where t = 1")
+        row = list(session.execute("SELECT * from counter_bug"))
+
+        assert rows_to_list(row)[0] == [1, 1]
+        assert len(row) == 1
+
+        session.execute("ALTER TABLE counter_bug drop c")
+
+        assert_invalid(session, "ALTER TABLE counter_bug add c counter", "Cannot re-add previously dropped counter column c")
+
+    @since("2.0", max_version="3.X") # Compact Storage
+    def test_compact_counter_cluster(self):
+        """
+        @jira_ticket CASSANDRA-12219
+        This test will fail on 3.0.0 - 3.0.8, and 3.1 - 3.8
+        """
+        cluster = self.cluster
+        cluster.populate(3).start()
+        node1 = cluster.nodelist()[0]
+        session = self.patient_cql_connection(node1)
+        create_ks(session, 'counter_tests', 1)
+
+        session.execute("""
+            CREATE TABLE IF NOT EXISTS counter_cs (
+                key bigint PRIMARY KEY,
+                data counter
+            ) WITH COMPACT STORAGE
+            """)
+
+        for outer in range(0, 5):
+            for idx in range(0, 5):
+                session.execute("UPDATE counter_cs SET data = data + 1 WHERE key = {k}".format(k=idx))
+
+        for idx in range(0, 5):
+            row = list(session.execute("SELECT data from counter_cs where key = {k}".format(k=idx)))
+            assert rows_to_list(row)[0][0] == 5

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/counter_tests.py
----------------------------------------------------------------------
diff --git a/counter_tests.py b/counter_tests.py
deleted file mode 100644
index 1de495d..0000000
--- a/counter_tests.py
+++ /dev/null
@@ -1,414 +0,0 @@
-import random
-import time
-import uuid
-
-from cassandra import ConsistencyLevel
-from cassandra.query import SimpleStatement
-
-from tools.assertions import assert_invalid, assert_length_equal, assert_one
-from dtest import Tester, create_ks, create_cf
-from tools.data import rows_to_list
-from tools.decorators import since
-
-
-class TestCounters(Tester):
-
-    @since('3.0', max_version='3.12')
-    def test_13691(self):
-        """
-        2.0 -> 2.1 -> 3.0 counters upgrade test
-        @jira_ticket CASSANDRA-13691
-        """
-        cluster = self.cluster
-        default_install_dir = cluster.get_install_dir()
-
-        #
-        # set up a 2.0 cluster with 3 nodes and set up schema
-        #
-
-        cluster.set_install_dir(version='2.0.17')
-        cluster.populate(3)
-        cluster.start()
-
-        node1, node2, node3 = cluster.nodelist()
-
-        session = self.patient_cql_connection(node1)
-        session.execute("""
-            CREATE KEYSPACE test
-                WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
-            """)
-        session.execute("CREATE TABLE test.test (id int PRIMARY KEY, c counter);")
-
-        #
-        # generate some 2.0 counter columns with local shards
-        #
-
-        query = "UPDATE test.test SET c = c + 1 WHERE id = ?"
-        prepared = session.prepare(query)
-        for i in range(0, 1000):
-            session.execute(prepared, [i])
-
-        cluster.flush()
-        cluster.stop()
-
-        #
-        # upgrade cluster to 2.1
-        #
-
-        cluster.set_install_dir(version='2.1.17')
-        cluster.start()
-        cluster.nodetool("upgradesstables")
-
-        #
-        # upgrade node3 to current (3.0.x or 3.11.x)
-        #
-
-        node3.stop(wait_other_notice=True)
-        node3.set_install_dir(install_dir=default_install_dir)
-        node3.start(wait_other_notice=True)
-
-        #
-        # with a 2.1 coordinator, try to read the table with CL.ALL
-        #
-
-        session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.ALL)
-        assert_one(session, "SELECT COUNT(*) FROM test.test", [1000])
-
-    def counter_leader_with_partial_view_test(self):
-        """
-        Test leader election with a starting node.
-
-        Testing that nodes do not elect as mutation leader a node with a partial view on the cluster.
-        Note that byteman rules can be syntax checked via the following command:
-            sh ./bin/bytemancheck.sh -cp ~/path_to/apache-cassandra-3.0.14-SNAPSHOT.jar ~/path_to/rule.btm
-
-        @jira_ticket CASSANDRA-13043
-        """
-        cluster = self.cluster
-
-        cluster.populate(3, use_vnodes=True, install_byteman=True)
-        nodes = cluster.nodelist()
-        # Have node 1 and 3 cheat a bit during the leader election for a counter mutation; note that cheating
-        # takes place iff there is an actual chance for node 2 to be picked.
-        if cluster.version() < '4.0':
-            nodes[0].update_startup_byteman_script('./byteman/pre4.0/election_counter_leader_favor_node2.btm')
-            nodes[2].update_startup_byteman_script('./byteman/pre4.0/election_counter_leader_favor_node2.btm')
-        else:
-            nodes[0].update_startup_byteman_script('./byteman/4.0/election_counter_leader_favor_node2.btm')
-            nodes[2].update_startup_byteman_script('./byteman/4.0/election_counter_leader_favor_node2.btm')
-
-        cluster.start(wait_for_binary_proto=True)
-        session = self.patient_cql_connection(nodes[0])
-        create_ks(session, 'ks', 3)
-        create_cf(session, 'cf', validation="CounterColumnType", columns={'c': 'counter'})
-
-        # Now stop the node and restart but first install a rule to slow down how fast node 2 will update the list
-        # nodes that are alive
-        nodes[1].stop(wait=True, wait_other_notice=False)
-        nodes[1].update_startup_byteman_script('./byteman/gossip_alive_callback_sleep.btm')
-        nodes[1].start(no_wait=True, wait_other_notice=False)
-
-        # Until node 2 is fully alive try to force other nodes to pick him as mutation leader.
-        # If CASSANDRA-13043 is fixed, they will not. Otherwise they will do, but since we are slowing down how
-        # fast node 2 updates the list of nodes that are alive, it will just have a partial view on the cluster
-        # and thus will raise an 'UnavailableException' exception.
-        nb_attempts = 50000
-        for i in xrange(0, nb_attempts):
-            # Change the name of the counter for the sake of randomization
-            q = SimpleStatement(
-                query_string="UPDATE ks.cf SET c = c + 1 WHERE key = 'counter_%d'" % i,
-                consistency_level=ConsistencyLevel.QUORUM
-            )
-            session.execute(q)
-
-    def simple_increment_test(self):
-        """ Simple incrementation test (Created for #3465, that wasn't a bug) """
-        cluster = self.cluster
-
-        cluster.populate(3).start()
-        nodes = cluster.nodelist()
-
-        session = self.patient_cql_connection(nodes[0])
-        create_ks(session, 'ks', 3)
-        create_cf(session, 'cf', validation="CounterColumnType", columns={'c': 'counter'})
-
-        sessions = [self.patient_cql_connection(node, 'ks') for node in nodes]
-        nb_increment = 50
-        nb_counter = 10
-
-        for i in xrange(0, nb_increment):
-            for c in xrange(0, nb_counter):
-                session = sessions[(i + c) % len(nodes)]
-                query = SimpleStatement("UPDATE cf SET c = c + 1 WHERE key = 'counter%i'" % c, consistency_level=ConsistencyLevel.QUORUM)
-                session.execute(query)
-
-            session = sessions[i % len(nodes)]
-            keys = ",".join(["'counter%i'" % c for c in xrange(0, nb_counter)])
-            query = SimpleStatement("SELECT key, c FROM cf WHERE key IN (%s)" % keys, consistency_level=ConsistencyLevel.QUORUM)
-            res = list(session.execute(query))
-
-            assert_length_equal(res, nb_counter)
-            for c in xrange(0, nb_counter):
-                self.assertEqual(len(res[c]), 2, "Expecting key and counter for counter {}, got {}".format(c, str(res[c])))
-                self.assertEqual(res[c][1], i + 1, "Expecting counter {} = {}, got {}".format(c, i + 1, res[c][0]))
-
-    def upgrade_test(self):
-        """ Test for bug of #4436 """
-
-        cluster = self.cluster
-
-        cluster.populate(2).start()
-        nodes = cluster.nodelist()
-
-        session = self.patient_cql_connection(nodes[0])
-        create_ks(session, 'ks', 2)
-
-        query = """
-            CREATE TABLE counterTable (
-                k int PRIMARY KEY,
-                c counter
-            )
-        """
-        query = query + "WITH compression = { 'sstable_compression' : 'SnappyCompressor' }"
-
-        session.execute(query)
-        time.sleep(2)
-
-        keys = range(0, 4)
-        updates = 50
-
-        def make_updates():
-            session = self.patient_cql_connection(nodes[0], keyspace='ks')
-            upd = "UPDATE counterTable SET c = c + 1 WHERE k = %d;"
-            batch = " ".join(["BEGIN COUNTER BATCH"] + [upd % x for x in keys] + ["APPLY BATCH;"])
-
-            for i in range(0, updates):
-                query = SimpleStatement(batch, consistency_level=ConsistencyLevel.QUORUM)
-                session.execute(query)
-
-        def check(i):
-            session = self.patient_cql_connection(nodes[0], keyspace='ks')
-            query = SimpleStatement("SELECT * FROM counterTable", consistency_level=ConsistencyLevel.QUORUM)
-            rows = list(session.execute(query))
-
-            self.assertEqual(len(rows), len(keys), "Expected {} rows, got {}: {}".format(len(keys), len(rows), str(rows)))
-            for row in rows:
-                self.assertEqual(row[1], i * updates, "Unexpected value {}".format(str(row)))
-
-        def rolling_restart():
-            # Rolling restart
-            for i in range(0, 2):
-                time.sleep(.2)
-                nodes[i].nodetool("drain")
-                nodes[i].stop(wait_other_notice=False)
-                nodes[i].start(wait_other_notice=True, wait_for_binary_proto=True)
-                time.sleep(.2)
-
-        make_updates()
-        check(1)
-        rolling_restart()
-
-        make_updates()
-        check(2)
-        rolling_restart()
-
-        make_updates()
-        check(3)
-        rolling_restart()
-
-        check(3)
-
-    def counter_consistency_test(self):
-        """
-        Do a bunch of writes with ONE, read back with ALL and check results.
-        """
-        cluster = self.cluster
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
-        session = self.patient_cql_connection(node1)
-        create_ks(session, 'counter_tests', 3)
-
-        stmt = """
-              CREATE TABLE counter_table (
-              id uuid PRIMARY KEY,
-              counter_one COUNTER,
-              counter_two COUNTER,
-              )
-           """
-        session.execute(stmt)
-
-        counters = []
-        # establish 50 counters (2x25 rows)
-        for i in xrange(25):
-            _id = str(uuid.uuid4())
-            counters.append(
-                {_id: {'counter_one': 1, 'counter_two': 1}}
-            )
-
-            query = SimpleStatement("""
-                UPDATE counter_table
-                SET counter_one = counter_one + 1, counter_two = counter_two + 1
-                where id = {uuid}""".format(uuid=_id), consistency_level=ConsistencyLevel.ONE)
-            session.execute(query)
-
-        # increment a bunch of counters with CL.ONE
-        for i in xrange(10000):
-            counter = counters[random.randint(0, len(counters) - 1)]
-            counter_id = counter.keys()[0]
-
-            query = SimpleStatement("""
-                UPDATE counter_table
-                SET counter_one = counter_one + 2
-                where id = {uuid}""".format(uuid=counter_id), consistency_level=ConsistencyLevel.ONE)
-            session.execute(query)
-
-            query = SimpleStatement("""
-                UPDATE counter_table
-                SET counter_two = counter_two + 10
-                where id = {uuid}""".format(uuid=counter_id), consistency_level=ConsistencyLevel.ONE)
-            session.execute(query)
-
-            query = SimpleStatement("""
-                UPDATE counter_table
-                SET counter_one = counter_one - 1
-                where id = {uuid}""".format(uuid=counter_id), consistency_level=ConsistencyLevel.ONE)
-            session.execute(query)
-
-            query = SimpleStatement("""
-                UPDATE counter_table
-                SET counter_two = counter_two - 5
-                where id = {uuid}""".format(uuid=counter_id), consistency_level=ConsistencyLevel.ONE)
-            session.execute(query)
-
-            # update expectations to match (assumed) db state
-            counter[counter_id]['counter_one'] += 1
-            counter[counter_id]['counter_two'] += 5
-
-        # let's verify the counts are correct, using CL.ALL
-        for counter_dict in counters:
-            counter_id = counter_dict.keys()[0]
-
-            query = SimpleStatement("""
-                SELECT counter_one, counter_two
-                FROM counter_table WHERE id = {uuid}
-                """.format(uuid=counter_id), consistency_level=ConsistencyLevel.ALL)
-            rows = list(session.execute(query))
-
-            counter_one_actual, counter_two_actual = rows[0]
-
-            self.assertEqual(counter_one_actual, counter_dict[counter_id]['counter_one'])
-            self.assertEqual(counter_two_actual, counter_dict[counter_id]['counter_two'])
-
-    def multi_counter_update_test(self):
-        """
-        Test for singlular update statements that will affect multiple counters.
-        """
-        cluster = self.cluster
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
-        session = self.patient_cql_connection(node1)
-        create_ks(session, 'counter_tests', 3)
-
-        session.execute("""
-            CREATE TABLE counter_table (
-            id text,
-            myuuid uuid,
-            counter_one COUNTER,
-            PRIMARY KEY (id, myuuid))
-            """)
-
-        expected_counts = {}
-
-        # set up expectations
-        for i in range(1, 6):
-            _id = uuid.uuid4()
-
-            expected_counts[_id] = i
-
-        for k, v in expected_counts.items():
-            session.execute("""
-                UPDATE counter_table set counter_one = counter_one + {v}
-                WHERE id='foo' and myuuid = {k}
-                """.format(k=k, v=v))
-
-        for k, v in expected_counts.items():
-            count = list(session.execute("""
-                SELECT counter_one FROM counter_table
-                WHERE id = 'foo' and myuuid = {k}
-                """.format(k=k)))
-
-            self.assertEqual(v, count[0][0])
-
-    @since("2.0", max_version="3.X")
-    def validate_empty_column_name_test(self):
-        cluster = self.cluster
-        cluster.populate(1).start()
-        node1 = cluster.nodelist()[0]
-        session = self.patient_cql_connection(node1)
-        create_ks(session, 'counter_tests', 1)
-
-        session.execute("""
-            CREATE TABLE compact_counter_table (
-                pk int,
-                ck text,
-                value counter,
-                PRIMARY KEY (pk, ck))
-            WITH COMPACT STORAGE
-            """)
-
-        assert_invalid(session, "UPDATE compact_counter_table SET value = value + 1 WHERE pk = 0 AND ck = ''")
-        assert_invalid(session, "UPDATE compact_counter_table SET value = value - 1 WHERE pk = 0 AND ck = ''")
-
-        session.execute("UPDATE compact_counter_table SET value = value + 5 WHERE pk = 0 AND ck = 'ck'")
-        session.execute("UPDATE compact_counter_table SET value = value - 2 WHERE pk = 0 AND ck = 'ck'")
-
-        assert_one(session, "SELECT pk, ck, value FROM compact_counter_table", [0, 'ck', 3])
-
-    @since('2.0')
-    def drop_counter_column_test(self):
-        """Test for CASSANDRA-7831"""
-        cluster = self.cluster
-        cluster.populate(1).start()
-        node1, = cluster.nodelist()
-        session = self.patient_cql_connection(node1)
-        create_ks(session, 'counter_tests', 1)
-
-        session.execute("CREATE TABLE counter_bug (t int, c counter, primary key(t))")
-
-        session.execute("UPDATE counter_bug SET c = c + 1 where t = 1")
-        row = list(session.execute("SELECT * from counter_bug"))
-
-        self.assertEqual(rows_to_list(row)[0], [1, 1])
-        self.assertEqual(len(row), 1)
-
-        session.execute("ALTER TABLE counter_bug drop c")
-
-        assert_invalid(session, "ALTER TABLE counter_bug add c counter", "Cannot re-add previously dropped counter column c")
-
-    @since("2.0", max_version="3.X")  # Compact Storage
-    def compact_counter_cluster_test(self):
-        """
-        @jira_ticket CASSANDRA-12219
-        This test will fail on 3.0.0 - 3.0.8, and 3.1 - 3.8
-        """
-
-        cluster = self.cluster
-        cluster.populate(3).start()
-        node1 = cluster.nodelist()[0]
-        session = self.patient_cql_connection(node1)
-        create_ks(session, 'counter_tests', 1)
-
-        session.execute("""
-            CREATE TABLE IF NOT EXISTS counter_cs (
-                key bigint PRIMARY KEY,
-                data counter
-            ) WITH COMPACT STORAGE
-            """)
-
-        for outer in range(0, 5):
-            for idx in range(0, 5):
-                session.execute("UPDATE counter_cs SET data = data + 1 WHERE key = {k}".format(k=idx))
-
-        for idx in range(0, 5):
-            row = list(session.execute("SELECT data from counter_cs where key = {k}".format(k=idx)))
-            self.assertEqual(rows_to_list(row)[0][0], 5)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cql_prepared_test.py
----------------------------------------------------------------------
diff --git a/cql_prepared_test.py b/cql_prepared_test.py
index 0dfe6f0..c039b90 100644
--- a/cql_prepared_test.py
+++ b/cql_prepared_test.py
@@ -1,7 +1,11 @@
 import time
+import pytest
+import logging
 
 from dtest import Tester, create_ks
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since("1.2")
@@ -18,7 +22,7 @@ class TestCQL(Tester):
         create_ks(session, 'ks', 1)
         return session
 
-    def batch_preparation_test(self):
+    def test_batch_preparation(self):
         """ Test preparation of batch statement (#4202) """
         session = self.prepare()
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[09/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_hsha_test.py
----------------------------------------------------------------------
diff --git a/thrift_hsha_test.py b/thrift_hsha_test.py
index 1aa8030..9e7dbf1 100644
--- a/thrift_hsha_test.py
+++ b/thrift_hsha_test.py
@@ -4,15 +4,15 @@ import shlex
 import subprocess
 import time
 import unittest
+import pytest
+import logging
 
-import pycassa
-
-from dtest import DEFAULT_DIR, Tester, debug, create_ks
-from tools.jmxutils import (JolokiaAgent, make_mbean,
-                            remove_perf_disable_shared_mem)
-
-from tools.decorators import since
+from dtest import DEFAULT_DIR, Tester, create_ks
+from thrift_test import get_thrift_client
+from tools.jmxutils import JolokiaAgent, make_mbean, remove_perf_disable_shared_mem
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 JNA_PATH = '/usr/share/java/jna.jar'
 ATTACK_JAR = 'lib/cassandra-attack.jar'
@@ -20,7 +20,7 @@ ATTACK_JAR = 'lib/cassandra-attack.jar'
 # Use jna.jar in {CASSANDRA_DIR,DEFAULT_DIR}/lib/, since >=2.1 needs correct version
 try:
     if glob.glob('%s/lib/jna-*.jar' % os.environ['CASSANDRA_DIR']):
-        debug('Using jna.jar in CASSANDRA_DIR/lib..')
+        logger.debug('Using jna.jar in CASSANDRA_DIR/lib..')
         JNA_IN_LIB = glob.glob('%s/lib/jna-*.jar' % os.environ['CASSANDRA_DIR'])
         JNA_PATH = JNA_IN_LIB[0]
 except KeyError:
@@ -31,7 +31,7 @@ except KeyError:
 
 
 @since('2.0', max_version='4')
-class ThriftHSHATest(Tester):
+class TestThriftHSHA(Tester):
 
     def test_closing_connections(self):
         """
@@ -56,26 +56,27 @@ class ThriftHSHATest(Tester):
         session.execute("CREATE TABLE \"CF\" (key text PRIMARY KEY, val text) WITH COMPACT STORAGE;")
 
         def make_connection():
-            pool = pycassa.ConnectionPool('test', timeout=None)
-            cf = pycassa.ColumnFamily(pool, 'CF')
-            return pool
+            host, port = node1.network_interfaces['thrift']
+            client = get_thrift_client(host, port)
+            client.transport.open()
+            return client
 
         pools = []
         connected_thrift_clients = make_mbean('metrics', type='Client', name='connectedThriftClients')
-        for i in xrange(10):
-            debug("Creating connection pools..")
-            for x in xrange(3):
+        for i in range(10):
+            logger.debug("Creating connection pools..")
+            for x in range(3):
                 pools.append(make_connection())
-            debug("Disabling/Enabling thrift iteration #{i}".format(i=i))
+            logger.debug("Disabling/Enabling thrift iteration #{i}".format(i=i))
             node1.nodetool('disablethrift')
             node1.nodetool('enablethrift')
-            debug("Closing connections from the client side..")
-            for pool in pools:
-                pool.dispose()
+            logger.debug("Closing connections from the client side..")
+            for client in pools:
+                client.transport.close()
 
             with JolokiaAgent(node1) as jmx:
                 num_clients = jmx.read_attribute(connected_thrift_clients, "Value")
-                self.assertEqual(int(num_clients), 0, "There are still open Thrift connections after stopping service")
+                assert int(num_clients), 0 == "There are still open Thrift connections after stopping service"
 
     @unittest.skipIf(not os.path.exists(ATTACK_JAR), "No attack jar found")
     @unittest.skipIf(not os.path.exists(JNA_PATH), "No JNA jar found")
@@ -104,7 +105,7 @@ class ThriftHSHATest(Tester):
         cluster.populate(2)
         nodes = (node1, node2) = cluster.nodelist()
         [n.start(use_jna=True) for n in nodes]
-        debug("Cluster started.")
+        logger.debug("Cluster started.")
 
         session = self.patient_cql_connection(node1)
         create_ks(session, 'tmp', 2)
@@ -117,13 +118,13 @@ class ThriftHSHATest(Tester):
 ) WITH COMPACT STORAGE;
 """)
 
-        debug("running attack jar...")
+        logger.debug("running attack jar...")
         p = subprocess.Popen(shlex.split("java -jar {attack_jar}".format(attack_jar=ATTACK_JAR)))
         p.communicate()
 
-        debug("Stopping cluster..")
+        logger.debug("Stopping cluster..")
         cluster.stop()
-        debug("Starting cluster..")
+        logger.debug("Starting cluster..")
         cluster.start(no_wait=True)
-        debug("Waiting 10 seconds before we're done..")
+        logger.debug("Waiting 10 seconds before we're done..")
         time.sleep(10)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[30/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cql_tests.py
----------------------------------------------------------------------
diff --git a/cql_tests.py b/cql_tests.py
deleted file mode 100644
index b4ac02f..0000000
--- a/cql_tests.py
+++ /dev/null
@@ -1,1501 +0,0 @@
-# coding: utf-8
-
-import itertools
-import struct
-import time
-
-from cassandra import ConsistencyLevel, InvalidRequest
-from cassandra.metadata import NetworkTopologyStrategy, SimpleStrategy
-from cassandra.policies import FallthroughRetryPolicy
-from cassandra.protocol import ProtocolException
-from cassandra.query import SimpleStatement
-
-from dtest import ReusableClusterTester, debug, Tester, create_ks
-from distutils.version import LooseVersion
-from thrift_bindings.v22.ttypes import \
-    ConsistencyLevel as ThriftConsistencyLevel
-from thrift_bindings.v22.ttypes import (CfDef, Column, ColumnOrSuperColumn,
-                                        Mutation)
-from thrift_tests import get_thrift_client
-from tools.assertions import (assert_all, assert_invalid, assert_length_equal,
-                              assert_none, assert_one, assert_unavailable)
-from tools.data import rows_to_list
-from tools.decorators import since
-from tools.metadata_wrapper import (UpdatingClusterMetadataWrapper,
-                                    UpdatingKeyspaceMetadataWrapper,
-                                    UpdatingTableMetadataWrapper)
-
-
-class CQLTester(Tester):
-
-    def prepare(self, ordered=False, create_keyspace=True, use_cache=False,
-                nodes=1, rf=1, protocol_version=None, user=None, password=None,
-                start_rpc=False, **kwargs):
-        cluster = self.cluster
-
-        if ordered:
-            cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")
-
-        if use_cache:
-            cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})
-
-        if start_rpc:
-            cluster.set_configuration_options(values={'start_rpc': True})
-
-        if user:
-            config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator',
-                      'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer',
-                      'permissions_validity_in_ms': 0}
-            cluster.set_configuration_options(values=config)
-
-        if not cluster.nodelist():
-            cluster.populate(nodes).start(wait_for_binary_proto=True)
-        node1 = cluster.nodelist()[0]
-
-        session = self.patient_cql_connection(node1, protocol_version=protocol_version, user=user, password=password)
-        if create_keyspace:
-            create_ks(session, 'ks', rf)
-        return session
-
-
-class StorageProxyCQLTester(CQLTester):
-    """
-    Each CQL statement is exercised at least once in order to
-    ensure we execute the code path in StorageProxy.
-    # TODO This probably isn't true anymore?
-    Note that in depth CQL validation is done in Java unit tests,
-    see CASSANDRA-9160.
-
-    # TODO I'm not convinced we need these. Seems like all the functionality
-    #      is covered in greater detail in other test classes.
-    """
-
-    def keyspace_test(self):
-        """
-        Smoke test that basic keyspace operations work:
-
-        - create a keyspace
-        - assert keyspace exists and is configured as expected with the driver metadata API
-        - ALTER it
-        - assert keyspace was correctly altered with the driver metadata API
-        - DROP it
-        - assert keyspace is no longer in keyspace metadata
-        """
-        session = self.prepare(create_keyspace=False)
-        meta = UpdatingClusterMetadataWrapper(session.cluster)
-
-        self.assertNotIn('ks', meta.keyspaces)
-        session.execute("CREATE KEYSPACE ks WITH replication = "
-                        "{ 'class':'SimpleStrategy', 'replication_factor':1} "
-                        "AND DURABLE_WRITES = true")
-        self.assertIn('ks', meta.keyspaces)
-
-        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
-        self.assertTrue(ks_meta.durable_writes)
-        self.assertIsInstance(ks_meta.replication_strategy, SimpleStrategy)
-
-        session.execute("ALTER KEYSPACE ks WITH replication = "
-                        "{ 'class' : 'NetworkTopologyStrategy', 'datacenter1' : 1 } "
-                        "AND DURABLE_WRITES = false")
-        self.assertFalse(ks_meta.durable_writes)
-        self.assertIsInstance(ks_meta.replication_strategy, NetworkTopologyStrategy)
-
-        session.execute("DROP KEYSPACE ks")
-        self.assertNotIn('ks', meta.keyspaces)
-
-    def table_test(self):
-        """
-        Smoke test that basic table operations work:
-
-        - create a table
-        - ALTER the table adding a column
-        - insert 10 values
-        - SELECT * and assert the values are there
-        - TRUNCATE the table
-        - SELECT * and assert there are no values
-        - DROP the table
-        - SELECT * and assert the statement raises an InvalidRequest
-        # TODO run SELECTs to make sure each statement works
-        """
-        session = self.prepare()
-
-        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
-
-        session.execute("CREATE TABLE test1 (k int PRIMARY KEY, v1 int)")
-        self.assertIn('test1', ks_meta.tables)
-
-        t1_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test1')
-
-        session.execute("ALTER TABLE test1 ADD v2 int")
-        self.assertIn('v2', t1_meta.columns)
-
-        for i in range(0, 10):
-            session.execute("INSERT INTO test1 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i))
-
-        assert_all(session, "SELECT * FROM test1", [[i, i, i] for i in range(0, 10)], ignore_order=True)
-
-        session.execute("TRUNCATE test1")
-
-        assert_none(session, "SELECT * FROM test1")
-
-        session.execute("DROP TABLE test1")
-        self.assertNotIn('test1', ks_meta.tables)
-
-    @since("2.0", max_version="3.X")
-    def table_test_compact_storage(self):
-        """
-        Smoke test that basic table operations work:
-
-        - create a table with COMPACT STORAGE
-        - insert 10 values
-        - SELECT * and assert the values are there
-        - TRUNCATE the table
-        - SELECT * and assert there are no values
-        - DROP the table
-        - SELECT * and assert the statement raises an InvalidRequest
-        # TODO run SELECTs to make sure each statement works
-        """
-        session = self.prepare()
-
-        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
-
-        session.execute("CREATE TABLE test2 (k int, c1 int, v1 int, PRIMARY KEY (k, c1)) WITH COMPACT STORAGE")
-        self.assertIn('test2', ks_meta.tables)
-
-        for i in range(0, 10):
-            session.execute("INSERT INTO test2 (k, c1, v1) VALUES ({i}, {i}, {i})".format(i=i))
-
-        assert_all(session, "SELECT * FROM test2", [[i, i, i] for i in range(0, 10)], ignore_order=True)
-
-        session.execute("TRUNCATE test2")
-
-        assert_none(session, "SELECT * FROM test2")
-
-        session.execute("DROP TABLE test2")
-        self.assertNotIn('test2', ks_meta.tables)
-
-    def index_test(self):
-        """
-        Smoke test CQL statements related to indexes:
-
-        - CREATE a table
-        - CREATE an index on that table
-        - INSERT 10 values into the table
-        - SELECT from the table over the indexed value and assert the expected values come back
-        - drop the index
-        - assert SELECTing over the indexed value raises an InvalidRequest
-        # TODO run SELECTs to make sure each statement works
-        """
-        session = self.prepare()
-
-        session.execute("CREATE TABLE test3 (k int PRIMARY KEY, v1 int, v2 int)")
-        table_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test3')
-        session.execute("CREATE INDEX testidx ON test3 (v1)")
-        self.assertIn('testidx', table_meta.indexes)
-
-        for i in range(0, 10):
-            session.execute("INSERT INTO test3 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i))
-
-        assert_one(session, "SELECT * FROM test3 WHERE v1 = 0", [0, 0, 0])
-
-        session.execute("DROP INDEX testidx")
-        self.assertNotIn('testidx', table_meta.indexes)
-
-    def type_test(self):
-        """
-        Smoke test basic TYPE operations:
-
-        - CREATE a type
-        - CREATE a table using that type
-        - ALTER the type and CREATE another table
-        - DROP the tables and type
-        - CREATE another table using the DROPped type and assert it fails with an InvalidRequest
-        # TODO run SELECTs to make sure each statement works
-        # TODO is this even necessary given the existence of the auth_tests?
-        """
-        session = self.prepare()
-        # even though we only ever use the user_types attribute of this object,
-        # we have to access it each time, because attribute access is how the
-        # value is updated
-        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
-
-        session.execute("CREATE TYPE address_t (street text, city text, zip_code int)")
-        self.assertIn('address_t', ks_meta.user_types)
-
-        session.execute("CREATE TABLE test4 (id int PRIMARY KEY, address frozen<address_t>)")
-
-        session.execute("ALTER TYPE address_t ADD phones set<text>")
-        self.assertIn('phones', ks_meta.user_types['address_t'].field_names)
-
-        # drop the table so we can safely drop the type it uses
-        session.execute("DROP TABLE test4")
-
-        session.execute("DROP TYPE address_t")
-        self.assertNotIn('address_t', ks_meta.user_types)
-
-    def user_test(self):
-        """
-        Smoke test for basic USER queries:
-
-        - get a session as the default superuser
-        - CREATE a user
-        - ALTER that user by giving it a different password
-        - DROP that user
-        # TODO list users after each to make sure each statement works
-        """
-        session = self.prepare(user='cassandra', password='cassandra')
-        node1 = self.cluster.nodelist()[0]
-
-        def get_usernames():
-            return [user.name for user in session.execute('LIST USERS')]
-
-        self.assertNotIn('user1', get_usernames())
-
-        session.execute("CREATE USER user1 WITH PASSWORD 'secret'")
-        # use patient to retry until it works, because it takes some time for
-        # the CREATE to take
-        self.patient_cql_connection(node1, user='user1', password='secret')
-
-        session.execute("ALTER USER user1 WITH PASSWORD 'secret^2'")
-        # use patient for same reason as above
-        self.patient_cql_connection(node1, user='user1', password='secret^2')
-
-        session.execute("DROP USER user1")
-        self.assertNotIn('user1', get_usernames())
-
-    def statements_test(self):
-        """
-        Smoke test SELECT and UPDATE statements:
-
-        - create a table
-        - insert 20 rows into the table
-        - run SELECT COUNT queries and assert they return the correct values
-            - bare and with IN and equality conditions
-        - run SELECT * queries with = conditions
-        - run UPDATE queries
-        - SELECT * and assert the UPDATEd values are there
-        - DELETE with a = condition
-        - SELECT the deleted values and make sure nothing is returned
-        # TODO run SELECTs to make sure each statement works
-        """
-        session = self.prepare()
-
-        session.execute("CREATE TABLE test7 (kind text, time int, v1 int, v2 int, PRIMARY KEY(kind, time) )")
-
-        for i in range(0, 10):
-            session.execute("INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev1', {i}, {i}, {i})".format(i=i))
-            session.execute("INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev2', {i}, {i}, {i})".format(i=i))
-
-        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [10])
-
-        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2')", [20])
-
-        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2') AND time=0", [2])
-
-        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, i, i] for i in range(0, 10)])
-
-        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev2'", [['ev2', i, i, i] for i in range(0, 10)])
-
-        for i in range(0, 10):
-            session.execute("UPDATE test7 SET v1 = 0, v2 = 0 where kind = 'ev1' AND time={i}".format(i=i))
-
-        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, 0, 0] for i in range(0, 10)])
-
-        session.execute("DELETE FROM test7 WHERE kind = 'ev1'")
-        assert_none(session, "SELECT * FROM test7 WHERE kind = 'ev1'")
-
-        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [0])
-
-    @since('3.10')
-    def partition_key_allow_filtering_test(self):
-        """
-        Filtering with unrestricted parts of partition keys
-
-        @jira_ticket CASSANDRA-11031
-        """
-        session = self.prepare()
-
-        session.execute("""
-            CREATE TABLE IF NOT EXISTS test_filter (
-                k1 int,
-                k2 int,
-                ck1 int,
-                v int,
-                PRIMARY KEY ((k1, k2), ck1)
-            )
-        """)
-
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 0, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 1, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 2, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 0, 3, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 0, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 1, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 2, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (0, 1, 3, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 0, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 1, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 2, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 0, 3, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 0, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 1, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 2, 0)")
-        session.execute("INSERT INTO test_filter (k1, k2, ck1, v) VALUES (1, 1, 3, 0)")
-
-        # select test
-        assert_all(session,
-                   "SELECT * FROM test_filter WHERE k1 = 0 ALLOW FILTERING",
-                   [[0, 0, 0, 0],
-                    [0, 0, 1, 0],
-                    [0, 0, 2, 0],
-                    [0, 0, 3, 0],
-                    [0, 1, 0, 0],
-                    [0, 1, 1, 0],
-                    [0, 1, 2, 0],
-                    [0, 1, 3, 0]],
-                   ignore_order=True)
-
-        assert_all(session,
-                   "SELECT * FROM test_filter WHERE k1 <= 1 AND k2 >= 1 ALLOW FILTERING",
-                   [[0, 1, 0, 0],
-                    [0, 1, 1, 0],
-                    [0, 1, 2, 0],
-                    [0, 1, 3, 0],
-                    [1, 1, 0, 0],
-                    [1, 1, 1, 0],
-                    [1, 1, 2, 0],
-                    [1, 1, 3, 0]],
-                   ignore_order=True)
-
-        assert_none(session, "SELECT * FROM test_filter WHERE k1 = 2 ALLOW FILTERING")
-        assert_none(session, "SELECT * FROM test_filter WHERE k1 <=0 AND k2 > 1 ALLOW FILTERING")
-
-        assert_all(session,
-                   "SELECT * FROM test_filter WHERE k2 <= 0 ALLOW FILTERING",
-                   [[0, 0, 0, 0],
-                    [0, 0, 1, 0],
-                    [0, 0, 2, 0],
-                    [0, 0, 3, 0],
-                    [1, 0, 0, 0],
-                    [1, 0, 1, 0],
-                    [1, 0, 2, 0],
-                    [1, 0, 3, 0]],
-                   ignore_order=True)
-
-        assert_all(session,
-                   "SELECT * FROM test_filter WHERE k1 <= 0 AND k2 = 0 ALLOW FILTERING",
-                   [[0, 0, 0, 0],
-                    [0, 0, 1, 0],
-                    [0, 0, 2, 0],
-                    [0, 0, 3, 0]])
-
-        assert_all(session,
-                   "SELECT * FROM test_filter WHERE k2 = 1 ALLOW FILTERING",
-                   [[0, 1, 0, 0],
-                    [0, 1, 1, 0],
-                    [0, 1, 2, 0],
-                    [0, 1, 3, 0],
-                    [1, 1, 0, 0],
-                    [1, 1, 1, 0],
-                    [1, 1, 2, 0],
-                    [1, 1, 3, 0]],
-                   ignore_order=True)
-
-        assert_none(session, "SELECT * FROM test_filter WHERE k2 = 2 ALLOW FILTERING")
-
-        # filtering on both Partition Key and Clustering key
-        assert_all(session,
-                   "SELECT * FROM test_filter WHERE k1 = 0 AND ck1=0 ALLOW FILTERING",
-                   [[0, 0, 0, 0],
-                    [0, 1, 0, 0]],
-                   ignore_order=True)
-
-        assert_all(session,
-                   "SELECT * FROM test_filter WHERE k1 = 0 AND k2=1 AND ck1=0 ALLOW FILTERING",
-                   [[0, 1, 0, 0]])
-
-        # count(*) test
-        assert_all(session,
-                   "SELECT count(*) FROM test_filter WHERE k2 = 0 ALLOW FILTERING",
-                   [[8]])
-
-        assert_all(session,
-                   "SELECT count(*) FROM test_filter WHERE k2 = 1 ALLOW FILTERING",
-                   [[8]])
-
-        assert_all(session,
-                   "SELECT count(*) FROM test_filter WHERE k2 = 2 ALLOW FILTERING",
-                   [[0]])
-
-        # test invalid query
-        with self.assertRaises(InvalidRequest):
-            session.execute("SELECT * FROM test_filter WHERE k1 = 0")
-
-        with self.assertRaises(InvalidRequest):
-            session.execute("SELECT * FROM test_filter WHERE k1 = 0 AND k2 > 0")
-
-        with self.assertRaises(InvalidRequest):
-            session.execute("SELECT * FROM test_filter WHERE k1 >= 0 AND k2 in (0,1,2)")
-
-        with self.assertRaises(InvalidRequest):
-            session.execute("SELECT * FROM test_filter WHERE k2 > 0")
-
-    def batch_test(self):
-        """
-        Smoke test for BATCH statements:
-
-        - CREATE a table
-        - create a BATCH statement and execute it at QUORUM
-        # TODO run SELECTs to make sure each statement works
-        """
-        session = self.prepare()
-
-        session.execute("""
-            CREATE TABLE test8 (
-                userid text PRIMARY KEY,
-                name text,
-                password text
-            )
-        """)
-
-        query = SimpleStatement("""
-            BEGIN BATCH
-                INSERT INTO test8 (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-                UPDATE test8 SET password = 'ps22dhds' WHERE userid = 'user3';
-                INSERT INTO test8 (userid, password) VALUES ('user4', 'ch@ngem3c');
-                DELETE name FROM test8 WHERE userid = 'user1';
-            APPLY BATCH;
-        """, consistency_level=ConsistencyLevel.QUORUM)
-        session.execute(query)
-
-
-class MiscellaneousCQLTester(CQLTester):
-    """
-    CQL tests that cannot be performed as Java unit tests, see CASSANDRA-9160.
-    If you're considering adding a test here, consider writing Java unit tests
-    for CQL validation instead. Add a new test here only if there is a reason
-    for it, e.g. the test is related to the client protocol or thrift, requires
-    examining the log files, or must run on multiple nodes.
-    """
-
-    @since('2.1', max_version='3.0')
-    def large_collection_errors_test(self):
-        """
-        Assert C* logs warnings when selecting too large a collection over
-        protocol v2:
-
-        - prepare the cluster and connect using protocol v2
-        - CREATE a table containing a map column
-        - insert over 65535 elements into the map
-        - select all the elements of the map
-        - assert that the correct error was logged
-        """
-
-        # We only warn with protocol 2
-        session = self.prepare(protocol_version=2)
-
-        cluster = self.cluster
-        node1 = cluster.nodelist()[0]
-        self.ignore_log_patterns = ["Detected collection for table"]
-
-        session.execute("""
-            CREATE TABLE maps (
-                userid text PRIMARY KEY,
-                properties map<int, text>
-            );
-        """)
-
-        # Insert more than the max, which is 65535
-        for i in range(70000):
-            session.execute("UPDATE maps SET properties[{}] = 'x' WHERE userid = 'user'".format(i))
-
-        # Query for the data and throw exception
-        session.execute("SELECT properties FROM maps WHERE userid = 'user'")
-        node1.watch_log_for("Detected collection for table ks.maps with 70000 elements, more than the 65535 limit. "
-                            "Only the first 65535 elements will be returned to the client. Please see "
-                            "http://cassandra.apache.org/doc/cql3/CQL.html#collections for more details.")
-
-    @since('2.0', max_version='4')
-    def cql3_insert_thrift_test(self):
-        """
-        Check that we can insert from thrift into a CQL3 table:
-
-        - CREATE a table via CQL
-        - insert values via thrift
-        - SELECT the inserted values and assert they are there as expected
-
-        @jira_ticket CASSANDRA-4377
-        """
-        session = self.prepare(start_rpc=True)
-
-        session.execute("""
-            CREATE TABLE test (
-                k int,
-                c int,
-                v int,
-                PRIMARY KEY (k, c)
-            )
-        """)
-
-        node = self.cluster.nodelist()[0]
-        host, port = node.network_interfaces['thrift']
-        client = get_thrift_client(host, port)
-        client.transport.open()
-        client.set_keyspace('ks')
-        key = struct.pack('>i', 2)
-        column_name_component = struct.pack('>i', 4)
-        # component length + component + EOC + component length + component + EOC
-        column_name = '\x00\x04' + column_name_component + '\x00' + '\x00\x01' + 'v' + '\x00'
-        value = struct.pack('>i', 8)
-        client.batch_mutate(
-            {key: {'test': [Mutation(ColumnOrSuperColumn(column=Column(name=column_name, value=value, timestamp=100)))]}},
-            ThriftConsistencyLevel.ONE)
-
-        assert_one(session, "SELECT * FROM test", [2, 4, 8])
-
-    @since('2.0', max_version='4')
-    def rename_test(self):
-        """
-        Check that a thrift-created table can be renamed via CQL:
-
-        - create a table via the thrift interface
-        - INSERT a row via CQL
-        - ALTER the name of the table via CQL
-        - SELECT from the table and assert the values inserted are there
-        """
-        session = self.prepare(start_rpc=True)
-
-        node = self.cluster.nodelist()[0]
-        host, port = node.network_interfaces['thrift']
-        client = get_thrift_client(host, port)
-        client.transport.open()
-
-        cfdef = CfDef()
-        cfdef.keyspace = 'ks'
-        cfdef.name = 'test'
-        cfdef.column_type = 'Standard'
-        cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)'
-        cfdef.key_validation_class = 'UTF8Type'
-        cfdef.default_validation_class = 'UTF8Type'
-
-        client.set_keyspace('ks')
-        client.system_add_column_family(cfdef)
-
-        session.execute("INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')")
-        session.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3")
-        assert_one(session, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
-
-    def invalid_string_literals_test(self):
-        """
-        @jira_ticket CASSANDRA-8101
-
-        - assert INSERTing into a nonexistent table fails normally, with an InvalidRequest exception
-        - create a table with ascii and text columns
-        - assert that trying to execute an insert statement with non-UTF8 contents raises a ProtocolException
-            - tries to insert into a nonexistent column to make sure the ProtocolException is raised over other errors
-        """
-        session = self.prepare()
-        # this should fail as normal, not with a ProtocolException
-        assert_invalid(session, u"insert into invalid_string_literals (k, a) VALUES (0, '\u038E\u0394\u03B4\u03E0')")
-
-        session = self.patient_cql_connection(self.cluster.nodelist()[0], keyspace='ks')
-        session.execute("create table invalid_string_literals (k int primary key, a ascii, b text)")
-
-        # this should still fail with an InvalidRequest
-        assert_invalid(session, u"insert into invalid_string_literals (k, c) VALUES (0, '\u038E\u0394\u03B4\u03E0')")
-        # but since the protocol requires strings to be valid UTF-8, the error
-        # response to this is a ProtocolException, not an error about the
-        # nonexistent column
-        with self.assertRaisesRegexp(ProtocolException, 'Cannot decode string as UTF8'):
-            session.execute("insert into invalid_string_literals (k, c) VALUES (0, '\xc2\x01')")
-
-    def prepared_statement_invalidation_test(self):
-        """
-        @jira_ticket CASSANDRA-7910
-
-        - CREATE a table and INSERT a row
-        - prepare 2 prepared SELECT statements
-        - SELECT the row with a bound prepared statement and assert it returns the expected row
-        - ALTER the table, dropping a column
-        - assert prepared statement without that column in it still works
-        - assert prepared statement containing that column fails
-        - ALTER the table, adding a column
-        - assert prepared statement without that column in it still works
-        - assert prepared statement containing that column also still works
-        - ALTER the table, changing the type of a column
-        - assert that both prepared statements still work
-        """
-        session = self.prepare()
-
-        session.execute("CREATE TABLE test (k int PRIMARY KEY, a int, b int, c int)")
-        session.execute("INSERT INTO test (k, a, b, c) VALUES (0, 0, 0, 0)")
-
-        wildcard_prepared = session.prepare("SELECT * FROM test")
-        explicit_prepared = session.prepare("SELECT k, a, b, c FROM test")
-        result = session.execute(wildcard_prepared.bind(None))
-        self.assertEqual(result, [(0, 0, 0, 0)])
-
-        session.execute("ALTER TABLE test DROP c")
-        result = session.execute(wildcard_prepared.bind(None))
-        # wildcard select can be automatically re-prepared by the driver
-        self.assertEqual(result, [(0, 0, 0)])
-        # but re-preparing the statement with explicit columns should fail
-        # (see PYTHON-207 for why we expect InvalidRequestException instead of the normal exc)
-        assert_invalid(session, explicit_prepared.bind(None), expected=InvalidRequest)
-
-        session.execute("ALTER TABLE test ADD d int")
-        result = session.execute(wildcard_prepared.bind(None))
-        self.assertEqual(result, [(0, 0, 0, None)])
-
-        if self.cluster.version() < LooseVersion('3.0'):
-            explicit_prepared = session.prepare("SELECT k, a, b, d FROM test")
-
-            # when the type is altered, both statements will need to be re-prepared
-            # by the driver, but the re-preparation should succeed
-            session.execute("ALTER TABLE test ALTER d TYPE blob")
-            result = session.execute(wildcard_prepared.bind(None))
-            self.assertEqual(result, [(0, 0, 0, None)])
-
-            result = session.execute(explicit_prepared.bind(None))
-            self.assertEqual(result, [(0, 0, 0, None)])
-
-    def range_slice_test(self):
-        """
-        Regression test for CASSANDRA-1337:
-
-        - CREATE a table
-        - INSERT 2 rows
-        - SELECT * from the table
-        - assert 2 rows were returned
-
-        @jira_ticket CASSANDRA-1337
-        # TODO I don't see how this is an interesting test or how it tests 1337.
-        """
-
-        cluster = self.cluster
-
-        cluster.populate(2).start()
-        node1 = cluster.nodelist()[0]
-        time.sleep(0.2)
-
-        session = self.patient_cql_connection(node1)
-        create_ks(session, 'ks', 1)
-
-        session.execute("""
-            CREATE TABLE test (
-                k text PRIMARY KEY,
-                v int
-            );
-        """)
-        time.sleep(1)
-
-        session.execute("INSERT INTO test (k, v) VALUES ('foo', 0)")
-        session.execute("INSERT INTO test (k, v) VALUES ('bar', 1)")
-
-        res = list(session.execute("SELECT * FROM test"))
-        self.assertEqual(len(res), 2, msg=res)
-
-    def many_columns_test(self):
-        """
-        Test for tables with thousands of columns.
-        For CASSANDRA-11621.
-        """
-
-        session = self.prepare()
-        width = 5000
-        cluster = self.cluster
-
-        session.execute("CREATE TABLE very_wide_table (pk int PRIMARY KEY, " +
-                        ",".join(map(lambda i: "c_{} int".format(i), range(width))) +
-                        ")")
-
-        session.execute("INSERT INTO very_wide_table (pk, " +
-                        ",".join(map(lambda i: "c_{}".format(i), range(width))) +
-                        ") VALUES (100," +
-                        ",".join(map(lambda i: str(i), range(width))) +
-                        ")")
-
-        assert_all(session, "SELECT " +
-                   ",".join(map(lambda i: "c_{}".format(i), range(width))) +
-                   " FROM very_wide_table", [[i for i in range(width)]])
-
-    @since("3.11", max_version="3.X")
-    def drop_compact_storage_flag_test(self):
-        """
-        Test for CASSANDRA-10857, verifying the schema change
-        distribution across the other nodes.
-
-        """
-
-        cluster = self.cluster
-
-        cluster.populate(3).start()
-        node1, node2, node3 = cluster.nodelist()
-
-        session1 = self.patient_cql_connection(node1)
-        session2 = self.patient_cql_connection(node2)
-        session3 = self.patient_cql_connection(node3)
-        create_ks(session1, 'ks', 3)
-        sessions = [session1, session2, session3]
-
-        for session in sessions:
-            session.set_keyspace('ks')
-
-        session1.execute("""
-            CREATE TABLE test_drop_compact_storage (k int PRIMARY KEY, s1 int) WITH COMPACT STORAGE;
-        """)
-
-        session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (1,1)")
-        session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (2,2)")
-        session1.execute("INSERT INTO test_drop_compact_storage (k, s1) VALUES (3,3)")
-
-        for session in sessions:
-            res = session.execute("SELECT * from test_drop_compact_storage")
-            self.assertEqual(rows_to_list(res), [[1, 1],
-                                                 [2, 2],
-                                                 [3, 3]])
-
-        session1.execute("ALTER TABLE test_drop_compact_storage DROP COMPACT STORAGE")
-
-        for session in sessions:
-            assert_all(session, "SELECT * from test_drop_compact_storage",
-                       [[1, None, 1, None],
-                        [2, None, 2, None],
-                        [3, None, 3, None]])
-
-
-@since('3.2')
-class AbortedQueryTester(CQLTester):
-    """
-    @jira_ticket CASSANDRA-7392
-
-    Test that read-queries that take longer than read_request_timeout_in_ms
-    time out.
-
-    # TODO The important part of these is "set up a combination of
-    #      configuration options that will make all reads time out, then
-    #      try to read and assert it times out". This can probably be made much
-    #      simpler -- most of the logic can be factored out. In many cases it
-    #      probably isn't even necessary to define a custom table or to insert
-    #      more than one value.
-    """
-
-    def local_query_test(self):
-        """
-        Check that a query running on the local coordinator node times out:
-
-        - set the read request timeouts to 1 second
-        - start the cluster with read_iteration_delay set to 5 ms
-            - the delay will be applied ot each row iterated and will cause
-              read queries to take longer than the read timeout
-        - CREATE and INSERT into a table
-        - SELECT * from the table using a retry policy that never retries, and assert it times out
-
-        @jira_ticket CASSANDRA-7392
-        """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'request_timeout_in_ms': 1000,
-                                                  'read_request_timeout_in_ms': 1000,
-                                                  'range_request_timeout_in_ms': 1000})
-
-        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
-        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds every
-        # CQL row iterated for non system queries, so that these queries take much longer to complete,
-        # see ReadCommand.withStateTracking()
-        cluster.populate(1).start(wait_for_binary_proto=True,
-                                  jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
-                                            "-Dcassandra.test.read_iteration_delay_ms=5"])
-        node = cluster.nodelist()[0]
-        session = self.patient_cql_connection(node)
-
-        create_ks(session, 'ks', 1)
-        session.execute("""
-            CREATE TABLE test1 (
-                id int PRIMARY KEY,
-                val text
-            );
-        """)
-
-        for i in range(500):
-            session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i))
-
-        # use debug logs because at info level no-spam logger has unpredictable results
-        mark = node.mark_log(filename='debug.log')
-        statement = SimpleStatement("SELECT * from test1",
-                                    consistency_level=ConsistencyLevel.ONE,
-                                    retry_policy=FallthroughRetryPolicy())
-        assert_unavailable(lambda c: debug(c.execute(statement)), session)
-        node.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=60)
-
-    def remote_query_test(self):
-        """
-        Check that a query running on a node other than the coordinator times out:
-
-        - populate the cluster with 2 nodes
-        - set the read request timeouts to 1 second
-        - start one node without having it join the ring
-        - start the other node with read_iteration_delay set to 5 ms
-            - the delay will be applied ot each row iterated and will cause
-              read queries to take longer than the read timeout
-        - CREATE a table
-        - INSERT 5000 rows on a session on the node that is not a member of the ring
-        - run SELECT statements and assert they fail
-        # TODO refactor SELECT statements:
-        #        - run the statements in a loop to reduce duplication
-        #        - watch the log after each query
-        #        - assert we raise the right error
-        """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'request_timeout_in_ms': 1000,
-                                                  'read_request_timeout_in_ms': 1000,
-                                                  'range_request_timeout_in_ms': 1000})
-
-        cluster.populate(2)
-        node1, node2 = cluster.nodelist()
-
-        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
-        node2.start(wait_for_binary_proto=True,
-                    jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
-                              "-Dcassandra.test.read_iteration_delay_ms=5"])  # see above for explanation
-
-        session = self.patient_exclusive_cql_connection(node1)
-
-        create_ks(session, 'ks', 1)
-        session.execute("""
-            CREATE TABLE test2 (
-                id int,
-                col int,
-                val text,
-                PRIMARY KEY(id, col)
-            );
-        """)
-
-        for i, j in itertools.product(range(10), range(500)):
-            session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j))
-
-        # use debug logs because at info level no-spam logger has unpredictable results
-        mark = node2.mark_log(filename='debug.log')
-
-        statement = SimpleStatement("SELECT * from test2",
-                                    consistency_level=ConsistencyLevel.ONE,
-                                    retry_policy=FallthroughRetryPolicy())
-        assert_unavailable(lambda c: debug(c.execute(statement)), session)
-
-        statement = SimpleStatement("SELECT * from test2 where id = 1",
-                                    consistency_level=ConsistencyLevel.ONE,
-                                    retry_policy=FallthroughRetryPolicy())
-        assert_unavailable(lambda c: debug(c.execute(statement)), session)
-
-        statement = SimpleStatement("SELECT * from test2 where id IN (1, 2, 3) AND col > 10",
-                                    consistency_level=ConsistencyLevel.ONE,
-                                    retry_policy=FallthroughRetryPolicy())
-        assert_unavailable(lambda c: debug(c.execute(statement)), session)
-
-        statement = SimpleStatement("SELECT * from test2 where col > 5 ALLOW FILTERING",
-                                    consistency_level=ConsistencyLevel.ONE,
-                                    retry_policy=FallthroughRetryPolicy())
-        assert_unavailable(lambda c: debug(c.execute(statement)), session)
-
-        node2.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=60)
-
-    def index_query_test(self):
-        """
-        Check that a secondary index query times out:
-
-        - populate a 1-node cluster
-        - set the read request timeouts to 1 second
-        - start one node without having it join the ring
-        - start the other node with read_iteration_delay set to 5 ms
-            - the delay will be applied ot each row iterated and will cause
-              read queries to take longer than the read timeout
-        - CREATE a table
-        - CREATE an index on the table
-        - INSERT 500 values into the table
-        - SELECT over the table and assert it times out
-        """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'request_timeout_in_ms': 1000,
-                                                  'read_request_timeout_in_ms': 1000,
-                                                  'range_request_timeout_in_ms': 1000})
-
-        cluster.populate(1).start(wait_for_binary_proto=True,
-                                  jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
-                                            "-Dcassandra.test.read_iteration_delay_ms=5"])  # see above for explanation
-        node = cluster.nodelist()[0]
-        session = self.patient_cql_connection(node)
-
-        create_ks(session, 'ks', 1)
-        session.execute("""
-            CREATE TABLE test3 (
-                id int PRIMARY KEY,
-                col int,
-                val text
-            );
-        """)
-
-        session.execute("CREATE INDEX ON test3 (col)")
-
-        for i in range(500):
-            session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, 50, 'foo')".format(i))
-
-        # use debug logs because at info level no-spam logger has unpredictable results
-        mark = node.mark_log(filename='debug.log')
-        statement = session.prepare("SELECT * from test3 WHERE col = ? ALLOW FILTERING")
-        statement.consistency_level = ConsistencyLevel.ONE
-        statement.retry_policy = FallthroughRetryPolicy()
-        assert_unavailable(lambda c: debug(c.execute(statement, [50])), session)
-        node.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=60)
-
-    def materialized_view_test(self):
-        """
-        Check that a materialized view query times out:
-
-        - populate a 2-node cluster
-        - set the read request timeouts to 1 second
-        - start one node without having it join the ring
-        - start the other node with read_iteration_delay set to 5 ms
-            - the delay will be applied ot each row iterated and will cause
-              read queries to take longer than the read timeout
-        - CREATE a table
-        - INSERT 500 values into that table
-        - CREATE a materialized view over that table
-        - assert querying that table results in an unavailable exception
-        """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'request_timeout_in_ms': 1000,
-                                                  'read_request_timeout_in_ms': 1000,
-                                                  'range_request_timeout_in_ms': 1000})
-
-        cluster.populate(2)
-        node1, node2 = cluster.nodelist()
-
-        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
-        node2.start(wait_for_binary_proto=True,
-                    jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
-                              "-Dcassandra.test.read_iteration_delay_ms=5"])  # see above for explanation
-
-        session = self.patient_exclusive_cql_connection(node1)
-
-        create_ks(session, 'ks', 1)
-        session.execute("""
-            CREATE TABLE test4 (
-                id int PRIMARY KEY,
-                col int,
-                val text
-            );
-        """)
-
-        session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test4 "
-                         "WHERE col IS NOT NULL AND id IS NOT NULL PRIMARY KEY (col, id)"))
-
-        for i in range(500):
-            session.execute("INSERT INTO test4 (id, col, val) VALUES ({}, 50, 'foo')".format(i))
-
-        # use debug logs because at info level no-spam logger has unpredictable results
-        mark = node2.mark_log(filename='debug.log')
-        statement = SimpleStatement("SELECT * FROM mv WHERE col = 50",
-                                    consistency_level=ConsistencyLevel.ONE,
-                                    retry_policy=FallthroughRetryPolicy())
-
-        assert_unavailable(lambda c: debug(c.execute(statement)), session)
-        node2.watch_log_for("operations timed out", filename='debug.log', from_mark=mark, timeout=60)
-
-
-@since('3.10')
-class SlowQueryTester(CQLTester):
-    """
-    Test slow query logging.
-
-    @jira_ticket CASSANDRA-12403
-    """
-    def local_query_test(self):
-        """
-        Check that a query running locally on the coordinator is reported as slow:
-
-        - start a one node cluster with slow_query_log_timeout_in_ms set to a small value
-          and the read request timeouts set to a large value (to ensure the query is not aborted) and
-          read_iteration_delay set to a value big enough for the query to exceed slow_query_log_timeout_in_ms
-          (this will cause read queries to take longer than the slow query timeout)
-        - CREATE and INSERT into a table
-        - SELECT * from the table using a retry policy that never retries, and check that the slow
-          query log messages are present in the debug logs (we cannot check the logs at info level because
-          the no spam logger has unpredictable results)
-
-        @jira_ticket CASSANDRA-12403
-        """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 10,
-                                                  'request_timeout_in_ms': 120000,
-                                                  'read_request_timeout_in_ms': 120000,
-                                                  'range_request_timeout_in_ms': 120000})
-
-        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
-        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
-        # iteration of non system queries, so that these queries take much longer to complete,
-        # see ReadCommand.withStateTracking()
-        cluster.populate(1).start(wait_for_binary_proto=True,
-                                  jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
-                                            "-Dcassandra.test.read_iteration_delay_ms=1"])
-        node = cluster.nodelist()[0]
-        session = self.patient_cql_connection(node)
-
-        create_ks(session, 'ks', 1)
-        session.execute("""
-            CREATE TABLE test1 (
-                id int,
-                col int,
-                val text,
-                PRIMARY KEY(id, col)
-            );
-        """)
-
-        for i in range(100):
-            session.execute("INSERT INTO test1 (id, col, val) VALUES (1, {}, 'foo')".format(i))
-
-        # only check debug logs because at INFO level the no-spam logger has unpredictable results
-        mark = node.mark_log(filename='debug.log')
-
-        session.execute(SimpleStatement("SELECT * from test1",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"],
-                           from_mark=mark, filename='debug.log', timeout=60)
-        mark = node.mark_log(filename='debug.log')
-
-        session.execute(SimpleStatement("SELECT * from test1 where id = 1",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"],
-                           from_mark=mark, filename='debug.log', timeout=60)
-        mark = node.mark_log(filename='debug.log')
-
-        session.execute(SimpleStatement("SELECT * from test1 where id = 1",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"],
-                           from_mark=mark, filename='debug.log', timeout=60)
-        mark = node.mark_log(filename='debug.log')
-
-        session.execute(SimpleStatement("SELECT * from test1 where token(id) < 0",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        node.watch_log_for(["operations were slow", "SELECT \* FROM ks.test1"],
-                           from_mark=mark, filename='debug.log', timeout=60)
-
-    def remote_query_test(self):
-        """
-        Check that a query running on a node other than the coordinator is reported as slow:
-
-        - populate the cluster with 2 nodes
-        - start one node without having it join the ring
-        - start the other one node with slow_query_log_timeout_in_ms set to a small value
-          and the read request timeouts set to a large value (to ensure the query is not aborted) and
-          read_iteration_delay set to a value big enough for the query to exceed slow_query_log_timeout_in_ms
-          (this will cause read queries to take longer than the slow query timeout)
-        - CREATE a table
-        - INSERT 5000 rows on a session on the node that is not a member of the ring
-        - run SELECT statements and check that the slow query messages are present in the debug logs
-          (we cannot check the logs at info level because the no spam logger has unpredictable results)
-
-        @jira_ticket CASSANDRA-12403
-        """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 10,
-                                                  'request_timeout_in_ms': 120000,
-                                                  'read_request_timeout_in_ms': 120000,
-                                                  'range_request_timeout_in_ms': 120000})
-
-        cluster.populate(2)
-        node1, node2 = cluster.nodelist()
-
-        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
-        node2.start(wait_for_binary_proto=True,
-                    jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
-                              "-Dcassandra.test.read_iteration_delay_ms=1"])  # see above for explanation
-
-        session = self.patient_exclusive_cql_connection(node1)
-
-        create_ks(session, 'ks', 1)
-        session.execute("""
-            CREATE TABLE test2 (
-                id int,
-                col int,
-                val text,
-                PRIMARY KEY(id, col)
-            );
-        """)
-
-        for i, j in itertools.product(range(100), range(10)):
-            session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j))
-
-        # only check debug logs because at INFO level the no-spam logger has unpredictable results
-        mark = node2.mark_log(filename='debug.log')
-        session.execute(SimpleStatement("SELECT * from test2",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"],
-                            from_mark=mark, filename='debug.log', timeout=60)
-        mark = node2.mark_log(filename='debug.log')
-
-        session.execute(SimpleStatement("SELECT * from test2 where id = 1",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"],
-                            from_mark=mark, filename='debug.log', timeout=60)
-        mark = node2.mark_log(filename='debug.log')
-
-        session.execute(SimpleStatement("SELECT * from test2 where id = 1",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"],
-                            from_mark=mark, filename='debug.log', timeout=60)
-        mark = node2.mark_log(filename='debug.log')
-
-        session.execute(SimpleStatement("SELECT * from test2 where token(id) < 0",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        node2.watch_log_for(["operations were slow", "SELECT \* FROM ks.test2"],
-                            from_mark=mark, filename='debug.log', timeout=60)
-
-    def disable_slow_query_log_test(self):
-        """
-        Check that a query is NOT reported as slow if slow query logging is disabled.
-
-        - start a one node cluster with slow_query_log_timeout_in_ms set to 0 milliseconds
-          (this will disable slow query logging), the read request timeouts set to a large value
-          (to ensure queries are not aborted) and read_iteration_delay set to 5 milliseconds
-          (this will cause read queries to take longer than usual)
-        - CREATE and INSERT into a table
-        - SELECT * from the table using a retry policy that never retries, and check that the slow
-          query log messages are present in the logs
-
-        @jira_ticket CASSANDRA-12403
-        """
-        cluster = self.cluster
-        cluster.set_configuration_options(values={'slow_query_log_timeout_in_ms': 0,
-                                                  'request_timeout_in_ms': 120000,
-                                                  'read_request_timeout_in_ms': 120000,
-                                                  'range_request_timeout_in_ms': 120000})
-
-        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
-        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
-        # iteration of non system queries, so that these queries take much longer to complete,
-        # see ReadCommand.withStateTracking()
-        cluster.populate(1).start(wait_for_binary_proto=True,
-                                  jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
-                                            "-Dcassandra.test.read_iteration_delay_ms=1"])
-        node = cluster.nodelist()[0]
-        session = self.patient_cql_connection(node)
-
-        create_ks(session, 'ks', 1)
-        session.execute("""
-            CREATE TABLE test3 (
-                id int PRIMARY KEY,
-                val text
-            );
-        """)
-
-        for i in range(100):
-            session.execute("INSERT INTO test3 (id, val) VALUES ({}, 'foo')".format(i))
-
-        session.execute(SimpleStatement("SELECT * from test3",
-                                        consistency_level=ConsistencyLevel.ONE,
-                                        retry_policy=FallthroughRetryPolicy()))
-
-        time.sleep(1)  # do our best to ensure logs had a chance to appear
-
-        self._check_logs(node, "SELECT \* FROM ks.test3", 'debug.log', 0)
-
-    def _check_logs(self, node, pattern, filename, num_expected):
-        ret = node.grep_log(pattern, filename=filename)
-        assert_length_equal(ret, num_expected)
-
-
-class LWTTester(ReusableClusterTester):
-    """
-    Validate CQL queries for LWTs for static columns for null and non-existing rows
-    @jira_ticket CASSANDRA-9842
-    """
-
-    @classmethod
-    def post_initialize_cluster(cls):
-        cluster = cls.cluster
-        cluster.populate(3)
-        cluster.start(wait_for_binary_proto=True)
-
-    def get_lwttester_session(self):
-        node1 = self.cluster.nodelist()[0]
-        session = self.patient_cql_connection(node1)
-        session.execute("""CREATE KEYSPACE IF NOT EXISTS ks WITH REPLICATION={'class':'SimpleStrategy',
-            'replication_factor':1}""")
-        session.execute("USE ks")
-        return session
-
-    def lwt_with_static_columns_test(self):
-        session = self.get_lwttester_session()
-
-        session.execute("""
-            CREATE TABLE lwt_with_static (a int, b int, s int static, d text, PRIMARY KEY (a, b))
-        """)
-
-        assert_one(session, "UPDATE lwt_with_static SET s = 1 WHERE a = 1 IF s = NULL", [True])
-
-        assert_one(session, "SELECT * FROM lwt_with_static", [1, None, 1, None])
-
-        assert_one(session, "UPDATE lwt_with_static SET s = 2 WHERE a = 2 IF EXISTS", [False])
-
-        assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 1", [1, None, 1, None])
-
-        assert_one(session, "INSERT INTO lwt_with_static (a, s) VALUES (2, 2) IF NOT EXISTS", [True])
-
-        assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 2", [2, None, 2, None])
-
-        assert_one(session, "BEGIN BATCH\n" +
-                   "INSERT INTO lwt_with_static (a, b, d) values (3, 3, 'a');\n" +
-                   "UPDATE lwt_with_static SET s = 3 WHERE a = 3 IF s = null;\n" +
-                   "APPLY BATCH;", [True])
-
-        assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 3", [3, 3, 3, "a"])
-
-        # LWT applies before INSERT
-        assert_one(session, "BEGIN BATCH\n" +
-                   "INSERT INTO lwt_with_static (a, b, d) values (4, 4, 'a');\n" +
-                   "UPDATE lwt_with_static SET s = 4 WHERE a = 4 IF s = null;\n" +
-                   "APPLY BATCH;", [True])
-
-        assert_one(session, "SELECT * FROM lwt_with_static WHERE a = 4", [4, 4, 4, "a"])
-
-    def _validate_non_existing_or_null_values(self, table_name, session):
-        assert_one(session, "UPDATE {} SET s = 1 WHERE a = 1 IF s = NULL".format(table_name), [True])
-
-        assert_one(session, "SELECT a, s, d FROM {} WHERE a = 1".format(table_name), [1, 1, None])
-
-        assert_one(session, "UPDATE {} SET s = 2 WHERE a = 2 IF s IN (10,20,NULL)".format(table_name), [True])
-
-        assert_one(session, "SELECT a, s, d FROM {} WHERE a = 2".format(table_name), [2, 2, None])
-
-        assert_one(session, "UPDATE {} SET s = 4 WHERE a = 4 IF s != 4".format(table_name), [True])
-
-        assert_one(session, "SELECT a, s, d FROM {} WHERE a = 4".format(table_name), [4, 4, None])
-
-    def _is_new_lwt_format_version(self, version):
-        return version > LooseVersion('3.9') or (version > LooseVersion('3.0.9') and version < LooseVersion('3.1'))
-
-    def conditional_updates_on_static_columns_with_null_values_test(self):
-        session = self.get_lwttester_session()
-
-        table_name = "conditional_updates_on_static_columns_with_null"
-        session.execute("""
-            CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b))
-        """.format(table_name))
-
-        for i in range(1, 6):
-            session.execute("INSERT INTO {} (a, b) VALUES ({}, {})".format(table_name, i, i))
-
-        self._validate_non_existing_or_null_values(table_name, session)
-
-        assert_one(session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(table_name),
-                   [False, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 3".format(table_name), [3, 3, None, None])
-
-        for operator in [">", "<", ">=", "<=", "="]:
-            assert_one(session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(table_name, operator),
-                       [False, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False])
-
-            assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, None, None])
-
-    def conditional_updates_on_static_columns_with_non_existing_values_test(self):
-        session = self.get_lwttester_session()
-
-        table_name = "conditional_updates_on_static_columns_with_ne"
-        session.execute("""
-            CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b))
-        """.format(table_name))
-
-        self._validate_non_existing_or_null_values(table_name, session)
-
-        assert_one(session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(table_name), [False])
-
-        assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name))
-
-        for operator in [">", "<", ">=", "<=", "="]:
-            assert_one(session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(table_name, operator), [False])
-
-            assert_none(session, "SELECT * FROM {} WHERE a = 5".format(table_name))
-
-    def _validate_non_existing_or_null_values_batch(self, table_name, session):
-        assert_one(session, """
-            BEGIN BATCH
-                INSERT INTO {table_name} (a, b, d) values (2, 2, 'a');
-                UPDATE {table_name} SET s = 2 WHERE a = 2 IF s = null;
-            APPLY BATCH""".format(table_name=table_name), [True])
-
-        assert_one(session, "SELECT * FROM {table_name} WHERE a = 2".format(table_name=table_name), [2, 2, 2, "a"])
-
-        assert_one(session, """
-            BEGIN BATCH
-                INSERT INTO {table_name} (a, b, s, d) values (4, 4, 4, 'a')
-                UPDATE {table_name} SET s = 5 WHERE a = 4 IF s = null;
-            APPLY BATCH""".format(table_name=table_name), [True])
-
-        assert_one(session, "SELECT * FROM {table_name} WHERE a = 4".format(table_name=table_name), [4, 4, 5, "a"])
-
-        assert_one(session, """
-            BEGIN BATCH
-                INSERT INTO {table_name} (a, b, s, d) values (5, 5, 5, 'a')
-                UPDATE {table_name} SET s = 6 WHERE a = 5 IF s IN (1,2,null)
-            APPLY BATCH""".format(table_name=table_name), [True])
-
-        assert_one(session, "SELECT * FROM {table_name} WHERE a = 5".format(table_name=table_name), [5, 5, 6, "a"])
-
-        assert_one(session, """
-            BEGIN BATCH
-                INSERT INTO {table_name} (a, b, s, d) values (7, 7, 7, 'a')
-                UPDATE {table_name} SET s = 8 WHERE a = 7 IF s != 7;
-            APPLY BATCH""".format(table_name=table_name), [True])
-
-        assert_one(session, "SELECT * FROM {table_name} WHERE a = 7".format(table_name=table_name), [7, 7, 8, "a"])
-
-    def conditional_updates_on_static_columns_with_null_values_batch_test(self):
-        session = self.get_lwttester_session()
-
-        table_name = "lwt_on_static_columns_with_null_batch"
-        session.execute("""
-            CREATE TABLE {table_name} (a int, b int, s int static, d text, PRIMARY KEY (a, b))
-        """.format(table_name=table_name))
-
-        for i in range(1, 7):
-            session.execute("INSERT INTO {table_name} (a, b) VALUES ({i}, {i})".format(table_name=table_name, i=i))
-
-        self._validate_non_existing_or_null_values_batch(table_name, session)
-
-        for operator in [">", "<", ">=", "<=", "="]:
-            assert_one(session, """
-                BEGIN BATCH
-                    INSERT INTO {table_name} (a, b, s, d) values (3, 3, 40, 'a')
-                    UPDATE {table_name} SET s = 30 WHERE a = 3 IF s {operator} 5;
-                APPLY BATCH""".format(table_name=table_name, operator=operator),
-                       [False, 3, 3, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False])
-
-            assert_one(session, "SELECT * FROM {table_name} WHERE a = 3".format(table_name=table_name), [3, 3, None, None])
-
-        assert_one(session, """
-                BEGIN BATCH
-                    INSERT INTO {table_name} (a, b, s, d) values (6, 6, 70, 'a')
-                    UPDATE {table_name} SET s = 60 WHERE a = 6 IF s IN (1,2,3)
-                APPLY BATCH""".format(table_name=table_name),
-                   [False, 6, 6, None] if self._is_new_lwt_format_version(self.cluster.version()) else [False])
-
-        assert_one(session, "SELECT * FROM {table_name} WHERE a = 6".format(table_name=table_name), [6, 6, None, None])
-
-    def conditional_deletes_on_static_columns_with_null_values_test(self):
-        session = self.get_lwttester_session()
-
-        table_name = "conditional_deletes_on_static_with_null"
-        session.execute("""
-            CREATE TABLE {} (a int, b int, s1 int static, s2 int static, v int, PRIMARY KEY (a, b))
-        """.format(table_name))
-
-        for i in range(1, 6):
-            session.execute("INSERT INTO {} (a, b, s1, s2, v) VALUES ({}, {}, {}, null, {})".format(table_name, i, i, i, i))
-
-        assert_one(session, "DELETE s1 FROM {} WHERE a = 1 IF s2 = null".format(table_name), [True])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 1".format(table_name), [1, 1, None, None, 1])
-
-        assert_one(session, "DELETE s1 FROM {} WHERE a = 2 IF s2 IN (10,20,30)".format(table_name), [False, None])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 2".format(table_name), [2, 2, 2, None, 2])
-
-        assert_one(session, "DELETE s1 FROM {} WHERE a = 3 IF s2 IN (null,20,30)".format(table_name), [True])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 3".format(table_name), [3, 3, None, None, 3])
-
-        assert_one(session, "DELETE s1 FROM {} WHERE a = 4 IF s2 != 4".format(table_name), [True])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 4".format(table_name), [4, 4, None, None, 4])
-
-        for operator in [">", "<", ">=", "<=", "="]:
-            assert_one(session, "DELETE s1 FROM {} WHERE a = 5 IF s2 {} 3".format(table_name, operator), [False, None])
-            assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, 5, None, 5])
-
-    def conditional_deletes_on_static_columns_with_null_values_batch_test(self):
-        session = self.get_lwttester_session()
-
-        table_name = "conditional_deletes_on_static_with_null_batch"
-        session.execute("""
-            CREATE TABLE {} (a int, b int, s1 int static, s2 int static, v int, PRIMARY KEY (a, b))
-        """.format(table_name))
-
-        assert_one(session, """
-             BEGIN BATCH
-                 INSERT INTO {table_name} (a, b, s1, v) values (2, 2, 2, 2);
-                 DELETE s1 FROM {table_name} WHERE a = 2 IF s2 = null;
-             APPLY BATCH""".format(table_name=table_name), [True])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 2".format(table_name), [2, 2, None, None, 2])
-
-        for operator in [">", "<", ">=", "<=", "="]:
-            assert_one(session, """
-                BEGIN BATCH
-                    INSERT INTO {table_name} (a, b, s1, v) values (3, 3, 3, 3);
-                    DELETE s1 FROM {table_name} WHERE a = 3 IF s2 {operator} 5;
-                APPLY BATCH""".format(table_name=table_name, operator=operator), [False])
-
-            assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name))
-
-        assert_one(session, """
-             BEGIN BATCH
-                 INSERT INTO {table_name} (a, b, s1, v) values (6, 6, 6, 6);
-                 DELETE s1 FROM {table_name} WHERE a = 6 IF s2 IN (1,2,3);
-             APPLY BATCH""".format(table_name=table_name), [False])
-
-        assert_none(session, "SELECT * FROM {} WHERE a = 6".format(table_name))
-
-        assert_one(session, """
-             BEGIN BATCH
-                 INSERT INTO {table_name} (a, b, s1, v) values (4, 4, 4, 4);
-                 DELETE s1 FROM {table_name} WHERE a = 4 IF s2 = null;
-             APPLY BATCH""".format(table_name=table_name), [True])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 4".format(table_name), [4, 4, None, None, 4])
-
-        assert_one(session, """
-            BEGIN BATCH
-                INSERT INTO {table_name} (a, b, s1, v) VALUES (5, 5, 5, 5);
-                DELETE s1 FROM {table_name} WHERE a = 5 IF s1 IN (1,2,null);
-            APPLY BATCH""".format(table_name=table_name), [True])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, None, None, 5])
-
-        assert_one(session, """
-            BEGIN BATCH
-                INSERT INTO {table_name} (a, b, s1, v) values (7, 7, 7, 7);
-                DELETE s1 FROM {table_name} WHERE a = 7 IF s2 != 7;
-            APPLY BATCH""".format(table_name=table_name), [True])
-
-        assert_one(session, "SELECT * FROM {} WHERE a = 7".format(table_name), [7, 7, None, None, 7])
-
-    def lwt_with_empty_resultset(self):
-        """
-        LWT with unset row.
-        @jira_ticket CASSANDRA-12694
-        """
-        session = self.get_lwttester_session()
-
-        session.execute("""
-            CREATE TABLE test (pk text, v1 int, v2 text, PRIMARY KEY (pk));
-        """)
-        session.execute("update test set v1 = 100 where pk = 'test1';")
-        node1 = self.cluster.nodelist()[0]
-        self.cluster.flush()
-        assert_one(session, "UPDATE test SET v1 = 100 WHERE pk = 'test1' IF v2 = null;", [True])

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/cql_tracing_test.py
----------------------------------------------------------------------
diff --git a/cql_tracing_test.py b/cql_tracing_test.py
index f25679c..fa6d4b8 100644
--- a/cql_tracing_test.py
+++ b/cql_tracing_test.py
@@ -1,10 +1,14 @@
-# coding: utf-8
+import pytest
+import logging
+
 from distutils.version import LooseVersion
 
-from dtest import Tester, debug, create_ks
-from tools.decorators import since
+from dtest import Tester, create_ks
 from tools.jmxutils import make_mbean, JolokiaAgent, remove_perf_disable_shared_mem
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 class TestCqlTracing(Tester):
     """
@@ -64,42 +68,42 @@ class TestCqlTracing(Tester):
         """)
 
         out, err, _ = node1.run_cqlsh('TRACING ON')
-        self.assertIn('Tracing is enabled', out)
+        assert 'Tracing is enabled' in out
 
         out, err, _ = node1.run_cqlsh('TRACING ON; SELECT * from system.peers')
-        self.assertIn('Tracing session: ', out)
-        self.assertIn('Request complete ', out)
+        assert 'Tracing session: ' in out
+        assert 'Request complete ' in out
 
         # Inserts
         out, err, _ = node1.run_cqlsh(
             "CONSISTENCY ALL; TRACING ON; "
             "INSERT INTO ks.users (userid, firstname, lastname, age) "
             "VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
-        debug(out)
-        self.assertIn('Tracing session: ', out)
+        logger.debug(out)
+        assert 'Tracing session: ' in out
 
-        self.assertIn(node1.address_for_current_version_slashy(), out)
-        self.assertIn(self.cluster.nodelist()[1].address_for_current_version_slashy(), out)
-        self.assertIn(self.cluster.nodelist()[2].address_for_current_version_slashy(), out)
+        assert node1.address_for_current_version_slashy() in out
+        assert self.cluster.nodelist()[1].address_for_current_version_slashy() in out
+        assert self.cluster.nodelist()[2].address_for_current_version_slashy() in out
 
-        self.assertIn('Parsing INSERT INTO ks.users ', out)
-        self.assertIn('Request complete ', out)
+        assert 'Parsing INSERT INTO ks.users ' in out
+        assert 'Request complete ' in out
 
         # Queries
         out, err, _ = node1.run_cqlsh('CONSISTENCY ALL; TRACING ON; '
                                       'SELECT firstname, lastname '
                                       'FROM ks.users WHERE userid = 550e8400-e29b-41d4-a716-446655440000')
-        debug(out)
-        self.assertIn('Tracing session: ', out)
+        logger.debug(out)
+        assert 'Tracing session: ' in out
 
-        self.assertIn(' 127.0.0.1 ', out)
-        self.assertIn(' 127.0.0.2 ', out)
-        self.assertIn(' 127.0.0.3 ', out)
-        self.assertIn('Request complete ', out)
-        self.assertIn(" Frodo |  Baggins", out)
+        assert ' 127.0.0.1 ' in out
+        assert ' 127.0.0.2 ' in out
+        assert ' 127.0.0.3 ' in out
+        assert 'Request complete ' in out
+        assert " Frodo |  Baggins" in out
 
     @since('2.2')
-    def tracing_simple_test(self):
+    def test_tracing_simple(self):
         """
         Test tracing using the default tracing class. See trace().
 
@@ -111,7 +115,7 @@ class TestCqlTracing(Tester):
         self.trace(session)
 
     @since('3.4')
-    def tracing_unknown_impl_test(self):
+    def test_tracing_unknown_impl(self):
         """
         Test that Cassandra logs an error, but keeps its default tracing
         behavior, when a nonexistent tracing class is specified.
@@ -125,23 +129,23 @@ class TestCqlTracing(Tester):
         @jira_ticket CASSANDRA-10392
         """
         expected_error = 'Cannot use class junk for tracing'
-        self.ignore_log_patterns = [expected_error]
+        self.fixture_dtest_setup.ignore_log_patterns = [expected_error]
         session = self.prepare(jvm_args=['-Dcassandra.custom_tracing_class=junk'])
         self.trace(session)
 
         errs = self.cluster.nodelist()[0].grep_log_for_errors()
-        debug('Errors after attempted trace with unknown tracing class: {errs}'.format(errs=errs))
-        self.assertEqual(len(errs), 1)
+        logger.debug('Errors after attempted trace with unknown tracing class: {errs}'.format(errs=errs))
+        assert len(errs) == 1
         if self.cluster.version() >= LooseVersion('3.10'):
             # See CASSANDRA-11706 and PR #1281
-            self.assertTrue(len(errs[0]) > 0)
+            assert len(errs[0]) > 0
         else:
-            self.assertEqual(len(errs[0]), 1)
+            assert len(errs[0]) == 1
         err = errs[0][0]
-        self.assertIn(expected_error, err)
+        assert expected_error in err
 
     @since('3.4')
-    def tracing_default_impl_test(self):
+    def test_tracing_default_impl(self):
         """
         Test that Cassandra logs an error, but keeps its default tracing
         behavior, when the default tracing class is specified.
@@ -158,20 +162,20 @@ class TestCqlTracing(Tester):
         @jira_ticket CASSANDRA-10392
         """
         expected_error = 'Cannot use class org.apache.cassandra.tracing.TracingImpl'
-        self.ignore_log_patterns = [expected_error]
+        self.fixture_dtest_setup.ignore_log_patterns = [expected_error]
         session = self.prepare(jvm_args=['-Dcassandra.custom_tracing_class=org.apache.cassandra.tracing.TracingImpl'])
         self.trace(session)
 
         errs = self.cluster.nodelist()[0].grep_log_for_errors()
-        debug('Errors after attempted trace with default tracing class: {errs}'.format(errs=errs))
-        self.assertEqual(len(errs), 1)
+        logger.debug('Errors after attempted trace with default tracing class: {errs}'.format(errs=errs))
+        assert len(errs) == 1
         if self.cluster.version() >= LooseVersion('3.10'):
             # See CASSANDRA-11706 and PR #1281
-            self.assertTrue(len(errs[0]) > 0)
+            assert len(errs[0]) > 0
         else:
-            self.assertEqual(len(errs[0]), 1)
+            assert len(errs[0]) == 1
         err = errs[0][0]
-        self.assertIn(expected_error, err)
+        assert expected_error in err
         # make sure it logged the error for the correct reason. this isn't
         # part of the expected error to avoid having to escape parens and
         # periods for regexes.
@@ -181,9 +185,8 @@ class TestCqlTracing(Tester):
             check_for_errs_in = errs[0][1]
         else:
             check_for_errs_in = err
-        self.assertIn("Default constructor for Tracing class "
-                      "'org.apache.cassandra.tracing.TracingImpl' is inaccessible.",
-                      check_for_errs_in)
+        assert "Default constructor for Tracing class 'org.apache.cassandra.tracing.TracingImpl' is inaccessible." \
+               in check_for_errs_in
 
     @since('3.0')
     def test_tracing_does_not_interfere_with_digest_calculation(self):
@@ -208,6 +211,6 @@ class TestCqlTracing(Tester):
             # If we are able to read the MBean attribute, assert that the count is 0
             if jmx.has_mbean(rr_count):
                 # expect 0 digest mismatches
-                self.assertEqual(0, jmx.read_attribute(rr_count, 'Count'))
+                assert 0 == jmx.read_attribute(rr_count, 'Count')
             else:
                 pass


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[12/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/v22/Cassandra.py
----------------------------------------------------------------------
diff --git a/thrift_bindings/v22/Cassandra.py b/thrift_bindings/v22/Cassandra.py
deleted file mode 100644
index 1e7dd0e..0000000
--- a/thrift_bindings/v22/Cassandra.py
+++ /dev/null
@@ -1,10506 +0,0 @@
-#
-# Autogenerated by Thrift Compiler (0.9.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-#  options string: py
-#
-
-from thrift.protocol import TBinaryProtocol, TProtocol
-from thrift.Thrift import (TApplicationException, TException, TMessageType,
-                           TProcessor, TType)
-from thrift.transport import TTransport
-
-from ttypes import *
-
-try:
-  from thrift.protocol import fastbinary
-except:
-  fastbinary = None
-
-
-class Iface:
-  def login(self, auth_request):
-    """
-    Parameters:
-     - auth_request
-    """
-    pass
-
-  def set_keyspace(self, keyspace):
-    """
-    Parameters:
-     - keyspace
-    """
-    pass
-
-  def get(self, key, column_path, consistency_level):
-    """
-    Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
-    the only method that can throw an exception under non-failure conditions.)
-
-    Parameters:
-     - key
-     - column_path
-     - consistency_level
-    """
-    pass
-
-  def get_slice(self, key, column_parent, predicate, consistency_level):
-    """
-    Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
-    pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
-
-    Parameters:
-     - key
-     - column_parent
-     - predicate
-     - consistency_level
-    """
-    pass
-
-  def get_count(self, key, column_parent, predicate, consistency_level):
-    """
-    returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
-    <code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
-
-    Parameters:
-     - key
-     - column_parent
-     - predicate
-     - consistency_level
-    """
-    pass
-
-  def multiget_slice(self, keys, column_parent, predicate, consistency_level):
-    """
-    Performs a get_slice for column_parent and predicate for the given keys in parallel.
-
-    Parameters:
-     - keys
-     - column_parent
-     - predicate
-     - consistency_level
-    """
-    pass
-
-  def multiget_count(self, keys, column_parent, predicate, consistency_level):
-    """
-    Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
-
-    Parameters:
-     - keys
-     - column_parent
-     - predicate
-     - consistency_level
-    """
-    pass
-
-  def get_range_slices(self, column_parent, predicate, range, consistency_level):
-    """
-    returns a subset of columns for a contiguous range of keys.
-
-    Parameters:
-     - column_parent
-     - predicate
-     - range
-     - consistency_level
-    """
-    pass
-
-  def get_paged_slice(self, column_family, range, start_column, consistency_level):
-    """
-    returns a range of columns, wrapping to the next rows if necessary to collect max_results.
-
-    Parameters:
-     - column_family
-     - range
-     - start_column
-     - consistency_level
-    """
-    pass
-
-  def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
-    """
-    Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
-    @deprecated use get_range_slices instead with range.row_filter specified
-
-    Parameters:
-     - column_parent
-     - index_clause
-     - column_predicate
-     - consistency_level
-    """
-    pass
-
-  def insert(self, key, column_parent, column, consistency_level):
-    """
-    Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
-
-    Parameters:
-     - key
-     - column_parent
-     - column
-     - consistency_level
-    """
-    pass
-
-  def add(self, key, column_parent, column, consistency_level):
-    """
-    Increment or decrement a counter.
-
-    Parameters:
-     - key
-     - column_parent
-     - column
-     - consistency_level
-    """
-    pass
-
-  def cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
-    """
-    Atomic compare and set.
-
-    If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values.
-    Otherwise, success will be false and current_values will contain the current values for the columns in
-    expected (that, by definition of compare-and-set, will differ from the values in expected).
-
-    A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the
-    level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL.
-    The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This
-    is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact
-    the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is
-    guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If
-    commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see
-    the write.
-
-    Parameters:
-     - key
-     - column_family
-     - expected
-     - updates
-     - serial_consistency_level
-     - commit_consistency_level
-    """
-    pass
-
-  def remove(self, key, column_path, timestamp, consistency_level):
-    """
-    Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
-    that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
-    row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
-
-    Parameters:
-     - key
-     - column_path
-     - timestamp
-     - consistency_level
-    """
-    pass
-
-  def remove_counter(self, key, path, consistency_level):
-    """
-    Remove a counter at the specified location.
-    Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
-    until the delete has reached all the nodes and all of them have been fully compacted.
-
-    Parameters:
-     - key
-     - path
-     - consistency_level
-    """
-    pass
-
-  def batch_mutate(self, mutation_map, consistency_level):
-    """
-      Mutate many columns or super columns for many row keys. See also: Mutation.
-
-      mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
-    *
-
-    Parameters:
-     - mutation_map
-     - consistency_level
-    """
-    pass
-
-  def atomic_batch_mutate(self, mutation_map, consistency_level):
-    """
-      Atomically mutate many columns or super columns for many row keys. See also: Mutation.
-
-      mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
-    *
-
-    Parameters:
-     - mutation_map
-     - consistency_level
-    """
-    pass
-
-  def truncate(self, cfname):
-    """
-    Truncate will mark and entire column family as deleted.
-    From the user's perspective a successful call to truncate will result complete data deletion from cfname.
-    Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
-    only marks the data as deleted.
-    The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
-    some hosts are down.
-
-    Parameters:
-     - cfname
-    """
-    pass
-
-  def get_multi_slice(self, request):
-    """
-    Select multiple slices of a key in a single RPC operation
-
-    Parameters:
-     - request
-    """
-    pass
-
-  def describe_schema_versions(self, ):
-    """
-    for each schema version present in the cluster, returns a list of nodes at that version.
-    hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
-    the cluster is all on the same version if the size of the map is 1.
-    """
-    pass
-
-  def describe_keyspaces(self, ):
-    """
-    list the defined keyspaces in this cluster
-    """
-    pass
-
-  def describe_cluster_name(self, ):
-    """
-    get the cluster name
-    """
-    pass
-
-  def describe_version(self, ):
-    """
-    get the thrift api version
-    """
-    pass
-
-  def describe_ring(self, keyspace):
-    """
-    get the token ring: a map of ranges to host addresses,
-    represented as a set of TokenRange instead of a map from range
-    to list of endpoints, because you can't use Thrift structs as
-    map keys:
-    https://issues.apache.org/jira/browse/THRIFT-162
-
-    for the same reason, we can't return a set here, even though
-    order is neither important nor predictable.
-
-    Parameters:
-     - keyspace
-    """
-    pass
-
-  def describe_local_ring(self, keyspace):
-    """
-    same as describe_ring, but considers only nodes in the local DC
-
-    Parameters:
-     - keyspace
-    """
-    pass
-
-  def describe_token_map(self, ):
-    """
-    get the mapping between token->node ip
-    without taking replication into consideration
-    https://issues.apache.org/jira/browse/CASSANDRA-4092
-    """
-    pass
-
-  def describe_partitioner(self, ):
-    """
-    returns the partitioner used by this cluster
-    """
-    pass
-
-  def describe_snitch(self, ):
-    """
-    returns the snitch used by this cluster
-    """
-    pass
-
-  def describe_keyspace(self, keyspace):
-    """
-    describe specified keyspace
-
-    Parameters:
-     - keyspace
-    """
-    pass
-
-  def describe_splits(self, cfName, start_token, end_token, keys_per_split):
-    """
-    experimental API for hadoop/parallel query support.
-    may change violently and without warning.
-
-    returns list of token strings such that first subrange is (list[0], list[1]],
-    next is (list[1], list[2]], etc.
-
-    Parameters:
-     - cfName
-     - start_token
-     - end_token
-     - keys_per_split
-    """
-    pass
-
-  def trace_next_query(self, ):
-    """
-    Enables tracing for the next query in this connection and returns the UUID for that trace session
-    The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
-    """
-    pass
-
-  def describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
-    """
-    Parameters:
-     - cfName
-     - start_token
-     - end_token
-     - keys_per_split
-    """
-    pass
-
-  def system_add_column_family(self, cf_def):
-    """
-    adds a column family. returns the new schema id.
-
-    Parameters:
-     - cf_def
-    """
-    pass
-
-  def system_drop_column_family(self, column_family):
-    """
-    drops a column family. returns the new schema id.
-
-    Parameters:
-     - column_family
-    """
-    pass
-
-  def system_add_keyspace(self, ks_def):
-    """
-    adds a keyspace and any column families that are part of it. returns the new schema id.
-
-    Parameters:
-     - ks_def
-    """
-    pass
-
-  def system_drop_keyspace(self, keyspace):
-    """
-    drops a keyspace and any column families that are part of it. returns the new schema id.
-
-    Parameters:
-     - keyspace
-    """
-    pass
-
-  def system_update_keyspace(self, ks_def):
-    """
-    updates properties of a keyspace. returns the new schema id.
-
-    Parameters:
-     - ks_def
-    """
-    pass
-
-  def system_update_column_family(self, cf_def):
-    """
-    updates properties of a column family. returns the new schema id.
-
-    Parameters:
-     - cf_def
-    """
-    pass
-
-  def execute_cql_query(self, query, compression):
-    """
-    @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
-
-    Parameters:
-     - query
-     - compression
-    """
-    pass
-
-  def execute_cql3_query(self, query, compression, consistency):
-    """
-    Executes a CQL3 (Cassandra Query Language) statement and returns a
-    CqlResult containing the results.
-
-    Parameters:
-     - query
-     - compression
-     - consistency
-    """
-    pass
-
-  def prepare_cql_query(self, query, compression):
-    """
-    @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
-
-    Parameters:
-     - query
-     - compression
-    """
-    pass
-
-  def prepare_cql3_query(self, query, compression):
-    """
-    Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning
-    - the type of CQL statement
-    - an id token of the compiled CQL stored on the server side.
-    - a count of the discovered bound markers in the statement
-
-    Parameters:
-     - query
-     - compression
-    """
-    pass
-
-  def execute_prepared_cql_query(self, itemId, values):
-    """
-    @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
-
-    Parameters:
-     - itemId
-     - values
-    """
-    pass
-
-  def execute_prepared_cql3_query(self, itemId, values, consistency):
-    """
-    Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables
-    to bind, and the consistency level, and returns a CqlResult containing the results.
-
-    Parameters:
-     - itemId
-     - values
-     - consistency
-    """
-    pass
-
-  def set_cql_version(self, version):
-    """
-    @deprecated This is now a no-op. Please use the CQL3 specific methods instead.
-
-    Parameters:
-     - version
-    """
-    pass
-
-
-class Client(Iface):
-  def __init__(self, iprot, oprot=None):
-    self._iprot = self._oprot = iprot
-    if oprot is not None:
-      self._oprot = oprot
-    self._seqid = 0
-
-  def login(self, auth_request):
-    """
-    Parameters:
-     - auth_request
-    """
-    self.send_login(auth_request)
-    self.recv_login()
-
-  def send_login(self, auth_request):
-    self._oprot.writeMessageBegin('login', TMessageType.CALL, self._seqid)
-    args = login_args()
-    args.auth_request = auth_request
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_login(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = login_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.authnx is not None:
-      raise result.authnx
-    if result.authzx is not None:
-      raise result.authzx
-    return
-
-  def set_keyspace(self, keyspace):
-    """
-    Parameters:
-     - keyspace
-    """
-    self.send_set_keyspace(keyspace)
-    self.recv_set_keyspace()
-
-  def send_set_keyspace(self, keyspace):
-    self._oprot.writeMessageBegin('set_keyspace', TMessageType.CALL, self._seqid)
-    args = set_keyspace_args()
-    args.keyspace = keyspace
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_set_keyspace(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = set_keyspace_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    return
-
-  def get(self, key, column_path, consistency_level):
-    """
-    Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
-    the only method that can throw an exception under non-failure conditions.)
-
-    Parameters:
-     - key
-     - column_path
-     - consistency_level
-    """
-    self.send_get(key, column_path, consistency_level)
-    return self.recv_get()
-
-  def send_get(self, key, column_path, consistency_level):
-    self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
-    args = get_args()
-    args.key = key
-    args.column_path = column_path
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = get_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.nfe is not None:
-      raise result.nfe
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result");
-
-  def get_slice(self, key, column_parent, predicate, consistency_level):
-    """
-    Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
-    pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
-
-    Parameters:
-     - key
-     - column_parent
-     - predicate
-     - consistency_level
-    """
-    self.send_get_slice(key, column_parent, predicate, consistency_level)
-    return self.recv_get_slice()
-
-  def send_get_slice(self, key, column_parent, predicate, consistency_level):
-    self._oprot.writeMessageBegin('get_slice', TMessageType.CALL, self._seqid)
-    args = get_slice_args()
-    args.key = key
-    args.column_parent = column_parent
-    args.predicate = predicate
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get_slice(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = get_slice_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_slice failed: unknown result");
-
-  def get_count(self, key, column_parent, predicate, consistency_level):
-    """
-    returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
-    <code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
-
-    Parameters:
-     - key
-     - column_parent
-     - predicate
-     - consistency_level
-    """
-    self.send_get_count(key, column_parent, predicate, consistency_level)
-    return self.recv_get_count()
-
-  def send_get_count(self, key, column_parent, predicate, consistency_level):
-    self._oprot.writeMessageBegin('get_count', TMessageType.CALL, self._seqid)
-    args = get_count_args()
-    args.key = key
-    args.column_parent = column_parent
-    args.predicate = predicate
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get_count(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = get_count_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_count failed: unknown result");
-
-  def multiget_slice(self, keys, column_parent, predicate, consistency_level):
-    """
-    Performs a get_slice for column_parent and predicate for the given keys in parallel.
-
-    Parameters:
-     - keys
-     - column_parent
-     - predicate
-     - consistency_level
-    """
-    self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
-    return self.recv_multiget_slice()
-
-  def send_multiget_slice(self, keys, column_parent, predicate, consistency_level):
-    self._oprot.writeMessageBegin('multiget_slice', TMessageType.CALL, self._seqid)
-    args = multiget_slice_args()
-    args.keys = keys
-    args.column_parent = column_parent
-    args.predicate = predicate
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_multiget_slice(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = multiget_slice_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_slice failed: unknown result");
-
-  def multiget_count(self, keys, column_parent, predicate, consistency_level):
-    """
-    Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
-
-    Parameters:
-     - keys
-     - column_parent
-     - predicate
-     - consistency_level
-    """
-    self.send_multiget_count(keys, column_parent, predicate, consistency_level)
-    return self.recv_multiget_count()
-
-  def send_multiget_count(self, keys, column_parent, predicate, consistency_level):
-    self._oprot.writeMessageBegin('multiget_count', TMessageType.CALL, self._seqid)
-    args = multiget_count_args()
-    args.keys = keys
-    args.column_parent = column_parent
-    args.predicate = predicate
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_multiget_count(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = multiget_count_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_count failed: unknown result");
-
-  def get_range_slices(self, column_parent, predicate, range, consistency_level):
-    """
-    returns a subset of columns for a contiguous range of keys.
-
-    Parameters:
-     - column_parent
-     - predicate
-     - range
-     - consistency_level
-    """
-    self.send_get_range_slices(column_parent, predicate, range, consistency_level)
-    return self.recv_get_range_slices()
-
-  def send_get_range_slices(self, column_parent, predicate, range, consistency_level):
-    self._oprot.writeMessageBegin('get_range_slices', TMessageType.CALL, self._seqid)
-    args = get_range_slices_args()
-    args.column_parent = column_parent
-    args.predicate = predicate
-    args.range = range
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get_range_slices(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = get_range_slices_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_range_slices failed: unknown result");
-
-  def get_paged_slice(self, column_family, range, start_column, consistency_level):
-    """
-    returns a range of columns, wrapping to the next rows if necessary to collect max_results.
-
-    Parameters:
-     - column_family
-     - range
-     - start_column
-     - consistency_level
-    """
-    self.send_get_paged_slice(column_family, range, start_column, consistency_level)
-    return self.recv_get_paged_slice()
-
-  def send_get_paged_slice(self, column_family, range, start_column, consistency_level):
-    self._oprot.writeMessageBegin('get_paged_slice', TMessageType.CALL, self._seqid)
-    args = get_paged_slice_args()
-    args.column_family = column_family
-    args.range = range
-    args.start_column = start_column
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get_paged_slice(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = get_paged_slice_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_paged_slice failed: unknown result");
-
-  def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
-    """
-    Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
-    @deprecated use get_range_slices instead with range.row_filter specified
-
-    Parameters:
-     - column_parent
-     - index_clause
-     - column_predicate
-     - consistency_level
-    """
-    self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level)
-    return self.recv_get_indexed_slices()
-
-  def send_get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
-    self._oprot.writeMessageBegin('get_indexed_slices', TMessageType.CALL, self._seqid)
-    args = get_indexed_slices_args()
-    args.column_parent = column_parent
-    args.index_clause = index_clause
-    args.column_predicate = column_predicate
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get_indexed_slices(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = get_indexed_slices_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_indexed_slices failed: unknown result");
-
-  def insert(self, key, column_parent, column, consistency_level):
-    """
-    Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
-
-    Parameters:
-     - key
-     - column_parent
-     - column
-     - consistency_level
-    """
-    self.send_insert(key, column_parent, column, consistency_level)
-    self.recv_insert()
-
-  def send_insert(self, key, column_parent, column, consistency_level):
-    self._oprot.writeMessageBegin('insert', TMessageType.CALL, self._seqid)
-    args = insert_args()
-    args.key = key
-    args.column_parent = column_parent
-    args.column = column
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_insert(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = insert_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    return
-
-  def add(self, key, column_parent, column, consistency_level):
-    """
-    Increment or decrement a counter.
-
-    Parameters:
-     - key
-     - column_parent
-     - column
-     - consistency_level
-    """
-    self.send_add(key, column_parent, column, consistency_level)
-    self.recv_add()
-
-  def send_add(self, key, column_parent, column, consistency_level):
-    self._oprot.writeMessageBegin('add', TMessageType.CALL, self._seqid)
-    args = add_args()
-    args.key = key
-    args.column_parent = column_parent
-    args.column = column
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_add(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = add_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    return
-
-  def cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
-    """
-    Atomic compare and set.
-
-    If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values.
-    Otherwise, success will be false and current_values will contain the current values for the columns in
-    expected (that, by definition of compare-and-set, will differ from the values in expected).
-
-    A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the
-    level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL.
-    The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This
-    is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact
-    the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is
-    guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If
-    commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see
-    the write.
-
-    Parameters:
-     - key
-     - column_family
-     - expected
-     - updates
-     - serial_consistency_level
-     - commit_consistency_level
-    """
-    self.send_cas(key, column_family, expected, updates, serial_consistency_level, commit_consistency_level)
-    return self.recv_cas()
-
-  def send_cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
-    self._oprot.writeMessageBegin('cas', TMessageType.CALL, self._seqid)
-    args = cas_args()
-    args.key = key
-    args.column_family = column_family
-    args.expected = expected
-    args.updates = updates
-    args.serial_consistency_level = serial_consistency_level
-    args.commit_consistency_level = commit_consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_cas(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = cas_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "cas failed: unknown result");
-
-  def remove(self, key, column_path, timestamp, consistency_level):
-    """
-    Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
-    that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
-    row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
-
-    Parameters:
-     - key
-     - column_path
-     - timestamp
-     - consistency_level
-    """
-    self.send_remove(key, column_path, timestamp, consistency_level)
-    self.recv_remove()
-
-  def send_remove(self, key, column_path, timestamp, consistency_level):
-    self._oprot.writeMessageBegin('remove', TMessageType.CALL, self._seqid)
-    args = remove_args()
-    args.key = key
-    args.column_path = column_path
-    args.timestamp = timestamp
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_remove(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = remove_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    return
-
-  def remove_counter(self, key, path, consistency_level):
-    """
-    Remove a counter at the specified location.
-    Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
-    until the delete has reached all the nodes and all of them have been fully compacted.
-
-    Parameters:
-     - key
-     - path
-     - consistency_level
-    """
-    self.send_remove_counter(key, path, consistency_level)
-    self.recv_remove_counter()
-
-  def send_remove_counter(self, key, path, consistency_level):
-    self._oprot.writeMessageBegin('remove_counter', TMessageType.CALL, self._seqid)
-    args = remove_counter_args()
-    args.key = key
-    args.path = path
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_remove_counter(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = remove_counter_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    return
-
-  def batch_mutate(self, mutation_map, consistency_level):
-    """
-      Mutate many columns or super columns for many row keys. See also: Mutation.
-
-      mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
-    *
-
-    Parameters:
-     - mutation_map
-     - consistency_level
-    """
-    self.send_batch_mutate(mutation_map, consistency_level)
-    self.recv_batch_mutate()
-
-  def send_batch_mutate(self, mutation_map, consistency_level):
-    self._oprot.writeMessageBegin('batch_mutate', TMessageType.CALL, self._seqid)
-    args = batch_mutate_args()
-    args.mutation_map = mutation_map
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_batch_mutate(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = batch_mutate_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    return
-
-  def atomic_batch_mutate(self, mutation_map, consistency_level):
-    """
-      Atomically mutate many columns or super columns for many row keys. See also: Mutation.
-
-      mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
-    *
-
-    Parameters:
-     - mutation_map
-     - consistency_level
-    """
-    self.send_atomic_batch_mutate(mutation_map, consistency_level)
-    self.recv_atomic_batch_mutate()
-
-  def send_atomic_batch_mutate(self, mutation_map, consistency_level):
-    self._oprot.writeMessageBegin('atomic_batch_mutate', TMessageType.CALL, self._seqid)
-    args = atomic_batch_mutate_args()
-    args.mutation_map = mutation_map
-    args.consistency_level = consistency_level
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_atomic_batch_mutate(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = atomic_batch_mutate_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    return
-
-  def truncate(self, cfname):
-    """
-    Truncate will mark and entire column family as deleted.
-    From the user's perspective a successful call to truncate will result complete data deletion from cfname.
-    Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
-    only marks the data as deleted.
-    The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
-    some hosts are down.
-
-    Parameters:
-     - cfname
-    """
-    self.send_truncate(cfname)
-    self.recv_truncate()
-
-  def send_truncate(self, cfname):
-    self._oprot.writeMessageBegin('truncate', TMessageType.CALL, self._seqid)
-    args = truncate_args()
-    args.cfname = cfname
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_truncate(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = truncate_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    return
-
-  def get_multi_slice(self, request):
-    """
-    Select multiple slices of a key in a single RPC operation
-
-    Parameters:
-     - request
-    """
-    self.send_get_multi_slice(request)
-    return self.recv_get_multi_slice()
-
-  def send_get_multi_slice(self, request):
-    self._oprot.writeMessageBegin('get_multi_slice', TMessageType.CALL, self._seqid)
-    args = get_multi_slice_args()
-    args.request = request
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get_multi_slice(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = get_multi_slice_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_multi_slice failed: unknown result");
-
-  def describe_schema_versions(self, ):
-    """
-    for each schema version present in the cluster, returns a list of nodes at that version.
-    hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
-    the cluster is all on the same version if the size of the map is 1.
-    """
-    self.send_describe_schema_versions()
-    return self.recv_describe_schema_versions()
-
-  def send_describe_schema_versions(self, ):
-    self._oprot.writeMessageBegin('describe_schema_versions', TMessageType.CALL, self._seqid)
-    args = describe_schema_versions_args()
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_schema_versions(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_schema_versions_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_schema_versions failed: unknown result");
-
-  def describe_keyspaces(self, ):
-    """
-    list the defined keyspaces in this cluster
-    """
-    self.send_describe_keyspaces()
-    return self.recv_describe_keyspaces()
-
-  def send_describe_keyspaces(self, ):
-    self._oprot.writeMessageBegin('describe_keyspaces', TMessageType.CALL, self._seqid)
-    args = describe_keyspaces_args()
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_keyspaces(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_keyspaces_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_keyspaces failed: unknown result");
-
-  def describe_cluster_name(self, ):
-    """
-    get the cluster name
-    """
-    self.send_describe_cluster_name()
-    return self.recv_describe_cluster_name()
-
-  def send_describe_cluster_name(self, ):
-    self._oprot.writeMessageBegin('describe_cluster_name', TMessageType.CALL, self._seqid)
-    args = describe_cluster_name_args()
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_cluster_name(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_cluster_name_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_cluster_name failed: unknown result");
-
-  def describe_version(self, ):
-    """
-    get the thrift api version
-    """
-    self.send_describe_version()
-    return self.recv_describe_version()
-
-  def send_describe_version(self, ):
-    self._oprot.writeMessageBegin('describe_version', TMessageType.CALL, self._seqid)
-    args = describe_version_args()
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_version(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_version_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_version failed: unknown result");
-
-  def describe_ring(self, keyspace):
-    """
-    get the token ring: a map of ranges to host addresses,
-    represented as a set of TokenRange instead of a map from range
-    to list of endpoints, because you can't use Thrift structs as
-    map keys:
-    https://issues.apache.org/jira/browse/THRIFT-162
-
-    for the same reason, we can't return a set here, even though
-    order is neither important nor predictable.
-
-    Parameters:
-     - keyspace
-    """
-    self.send_describe_ring(keyspace)
-    return self.recv_describe_ring()
-
-  def send_describe_ring(self, keyspace):
-    self._oprot.writeMessageBegin('describe_ring', TMessageType.CALL, self._seqid)
-    args = describe_ring_args()
-    args.keyspace = keyspace
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_ring(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_ring_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_ring failed: unknown result");
-
-  def describe_local_ring(self, keyspace):
-    """
-    same as describe_ring, but considers only nodes in the local DC
-
-    Parameters:
-     - keyspace
-    """
-    self.send_describe_local_ring(keyspace)
-    return self.recv_describe_local_ring()
-
-  def send_describe_local_ring(self, keyspace):
-    self._oprot.writeMessageBegin('describe_local_ring', TMessageType.CALL, self._seqid)
-    args = describe_local_ring_args()
-    args.keyspace = keyspace
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_local_ring(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_local_ring_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_local_ring failed: unknown result");
-
-  def describe_token_map(self, ):
-    """
-    get the mapping between token->node ip
-    without taking replication into consideration
-    https://issues.apache.org/jira/browse/CASSANDRA-4092
-    """
-    self.send_describe_token_map()
-    return self.recv_describe_token_map()
-
-  def send_describe_token_map(self, ):
-    self._oprot.writeMessageBegin('describe_token_map', TMessageType.CALL, self._seqid)
-    args = describe_token_map_args()
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_token_map(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_token_map_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_token_map failed: unknown result");
-
-  def describe_partitioner(self, ):
-    """
-    returns the partitioner used by this cluster
-    """
-    self.send_describe_partitioner()
-    return self.recv_describe_partitioner()
-
-  def send_describe_partitioner(self, ):
-    self._oprot.writeMessageBegin('describe_partitioner', TMessageType.CALL, self._seqid)
-    args = describe_partitioner_args()
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_partitioner(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_partitioner_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_partitioner failed: unknown result");
-
-  def describe_snitch(self, ):
-    """
-    returns the snitch used by this cluster
-    """
-    self.send_describe_snitch()
-    return self.recv_describe_snitch()
-
-  def send_describe_snitch(self, ):
-    self._oprot.writeMessageBegin('describe_snitch', TMessageType.CALL, self._seqid)
-    args = describe_snitch_args()
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_snitch(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_snitch_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_snitch failed: unknown result");
-
-  def describe_keyspace(self, keyspace):
-    """
-    describe specified keyspace
-
-    Parameters:
-     - keyspace
-    """
-    self.send_describe_keyspace(keyspace)
-    return self.recv_describe_keyspace()
-
-  def send_describe_keyspace(self, keyspace):
-    self._oprot.writeMessageBegin('describe_keyspace', TMessageType.CALL, self._seqid)
-    args = describe_keyspace_args()
-    args.keyspace = keyspace
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_keyspace(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_keyspace_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.nfe is not None:
-      raise result.nfe
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_keyspace failed: unknown result");
-
-  def describe_splits(self, cfName, start_token, end_token, keys_per_split):
-    """
-    experimental API for hadoop/parallel query support.
-    may change violently and without warning.
-
-    returns list of token strings such that first subrange is (list[0], list[1]],
-    next is (list[1], list[2]], etc.
-
-    Parameters:
-     - cfName
-     - start_token
-     - end_token
-     - keys_per_split
-    """
-    self.send_describe_splits(cfName, start_token, end_token, keys_per_split)
-    return self.recv_describe_splits()
-
-  def send_describe_splits(self, cfName, start_token, end_token, keys_per_split):
-    self._oprot.writeMessageBegin('describe_splits', TMessageType.CALL, self._seqid)
-    args = describe_splits_args()
-    args.cfName = cfName
-    args.start_token = start_token
-    args.end_token = end_token
-    args.keys_per_split = keys_per_split
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_splits(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_splits_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits failed: unknown result");
-
-  def trace_next_query(self, ):
-    """
-    Enables tracing for the next query in this connection and returns the UUID for that trace session
-    The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
-    """
-    self.send_trace_next_query()
-    return self.recv_trace_next_query()
-
-  def send_trace_next_query(self, ):
-    self._oprot.writeMessageBegin('trace_next_query', TMessageType.CALL, self._seqid)
-    args = trace_next_query_args()
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_trace_next_query(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = trace_next_query_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "trace_next_query failed: unknown result");
-
-  def describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
-    """
-    Parameters:
-     - cfName
-     - start_token
-     - end_token
-     - keys_per_split
-    """
-    self.send_describe_splits_ex(cfName, start_token, end_token, keys_per_split)
-    return self.recv_describe_splits_ex()
-
-  def send_describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
-    self._oprot.writeMessageBegin('describe_splits_ex', TMessageType.CALL, self._seqid)
-    args = describe_splits_ex_args()
-    args.cfName = cfName
-    args.start_token = start_token
-    args.end_token = end_token
-    args.keys_per_split = keys_per_split
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_describe_splits_ex(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = describe_splits_ex_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits_ex failed: unknown result");
-
-  def system_add_column_family(self, cf_def):
-    """
-    adds a column family. returns the new schema id.
-
-    Parameters:
-     - cf_def
-    """
-    self.send_system_add_column_family(cf_def)
-    return self.recv_system_add_column_family()
-
-  def send_system_add_column_family(self, cf_def):
-    self._oprot.writeMessageBegin('system_add_column_family', TMessageType.CALL, self._seqid)
-    args = system_add_column_family_args()
-    args.cf_def = cf_def
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_system_add_column_family(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = system_add_column_family_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "system_add_column_family failed: unknown result");
-
-  def system_drop_column_family(self, column_family):
-    """
-    drops a column family. returns the new schema id.
-
-    Parameters:
-     - column_family
-    """
-    self.send_system_drop_column_family(column_family)
-    return self.recv_system_drop_column_family()
-
-  def send_system_drop_column_family(self, column_family):
-    self._oprot.writeMessageBegin('system_drop_column_family', TMessageType.CALL, self._seqid)
-    args = system_drop_column_family_args()
-    args.column_family = column_family
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_system_drop_column_family(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = system_drop_column_family_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_column_family failed: unknown result");
-
-  def system_add_keyspace(self, ks_def):
-    """
-    adds a keyspace and any column families that are part of it. returns the new schema id.
-
-    Parameters:
-     - ks_def
-    """
-    self.send_system_add_keyspace(ks_def)
-    return self.recv_system_add_keyspace()
-
-  def send_system_add_keyspace(self, ks_def):
-    self._oprot.writeMessageBegin('system_add_keyspace', TMessageType.CALL, self._seqid)
-    args = system_add_keyspace_args()
-    args.ks_def = ks_def
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_system_add_keyspace(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = system_add_keyspace_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "system_add_keyspace failed: unknown result");
-
-  def system_drop_keyspace(self, keyspace):
-    """
-    drops a keyspace and any column families that are part of it. returns the new schema id.
-
-    Parameters:
-     - keyspace
-    """
-    self.send_system_drop_keyspace(keyspace)
-    return self.recv_system_drop_keyspace()
-
-  def send_system_drop_keyspace(self, keyspace):
-    self._oprot.writeMessageBegin('system_drop_keyspace', TMessageType.CALL, self._seqid)
-    args = system_drop_keyspace_args()
-    args.keyspace = keyspace
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_system_drop_keyspace(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = system_drop_keyspace_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_keyspace failed: unknown result");
-
-  def system_update_keyspace(self, ks_def):
-    """
-    updates properties of a keyspace. returns the new schema id.
-
-    Parameters:
-     - ks_def
-    """
-    self.send_system_update_keyspace(ks_def)
-    return self.recv_system_update_keyspace()
-
-  def send_system_update_keyspace(self, ks_def):
-    self._oprot.writeMessageBegin('system_update_keyspace', TMessageType.CALL, self._seqid)
-    args = system_update_keyspace_args()
-    args.ks_def = ks_def
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_system_update_keyspace(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = system_update_keyspace_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "system_update_keyspace failed: unknown result");
-
-  def system_update_column_family(self, cf_def):
-    """
-    updates properties of a column family. returns the new schema id.
-
-    Parameters:
-     - cf_def
-    """
-    self.send_system_update_column_family(cf_def)
-    return self.recv_system_update_column_family()
-
-  def send_system_update_column_family(self, cf_def):
-    self._oprot.writeMessageBegin('system_update_column_family', TMessageType.CALL, self._seqid)
-    args = system_update_column_family_args()
-    args.cf_def = cf_def
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_system_update_column_family(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = system_update_column_family_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "system_update_column_family failed: unknown result");
-
-  def execute_cql_query(self, query, compression):
-    """
-    @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
-
-    Parameters:
-     - query
-     - compression
-    """
-    self.send_execute_cql_query(query, compression)
-    return self.recv_execute_cql_query()
-
-  def send_execute_cql_query(self, query, compression):
-    self._oprot.writeMessageBegin('execute_cql_query', TMessageType.CALL, self._seqid)
-    args = execute_cql_query_args()
-    args.query = query
-    args.compression = compression
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_execute_cql_query(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = execute_cql_query_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_cql_query failed: unknown result");
-
-  def execute_cql3_query(self, query, compression, consistency):
-    """
-    Executes a CQL3 (Cassandra Query Language) statement and returns a
-    CqlResult containing the results.
-
-    Parameters:
-     - query
-     - compression
-     - consistency
-    """
-    self.send_execute_cql3_query(query, compression, consistency)
-    return self.recv_execute_cql3_query()
-
-  def send_execute_cql3_query(self, query, compression, consistency):
-    self._oprot.writeMessageBegin('execute_cql3_query', TMessageType.CALL, self._seqid)
-    args = execute_cql3_query_args()
-    args.query = query
-    args.compression = compression
-    args.consistency = consistency
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_execute_cql3_query(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = execute_cql3_query_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_cql3_query failed: unknown result");
-
-  def prepare_cql_query(self, query, compression):
-    """
-    @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
-
-    Parameters:
-     - query
-     - compression
-    """
-    self.send_prepare_cql_query(query, compression)
-    return self.recv_prepare_cql_query()
-
-  def send_prepare_cql_query(self, query, compression):
-    self._oprot.writeMessageBegin('prepare_cql_query', TMessageType.CALL, self._seqid)
-    args = prepare_cql_query_args()
-    args.query = query
-    args.compression = compression
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_prepare_cql_query(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = prepare_cql_query_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "prepare_cql_query failed: unknown result");
-
-  def prepare_cql3_query(self, query, compression):
-    """
-    Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning
-    - the type of CQL statement
-    - an id token of the compiled CQL stored on the server side.
-    - a count of the discovered bound markers in the statement
-
-    Parameters:
-     - query
-     - compression
-    """
-    self.send_prepare_cql3_query(query, compression)
-    return self.recv_prepare_cql3_query()
-
-  def send_prepare_cql3_query(self, query, compression):
-    self._oprot.writeMessageBegin('prepare_cql3_query', TMessageType.CALL, self._seqid)
-    args = prepare_cql3_query_args()
-    args.query = query
-    args.compression = compression
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_prepare_cql3_query(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = prepare_cql3_query_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "prepare_cql3_query failed: unknown result");
-
-  def execute_prepared_cql_query(self, itemId, values):
-    """
-    @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
-
-    Parameters:
-     - itemId
-     - values
-    """
-    self.send_execute_prepared_cql_query(itemId, values)
-    return self.recv_execute_prepared_cql_query()
-
-  def send_execute_prepared_cql_query(self, itemId, values):
-    self._oprot.writeMessageBegin('execute_prepared_cql_query', TMessageType.CALL, self._seqid)
-    args = execute_prepared_cql_query_args()
-    args.itemId = itemId
-    args.values = values
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_execute_prepared_cql_query(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = execute_prepared_cql_query_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_prepared_cql_query failed: unknown result");
-
-  def execute_prepared_cql3_query(self, itemId, values, consistency):
-    """
-    Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables
-    to bind, and the consistency level, and returns a CqlResult containing the results.
-
-    Parameters:
-     - itemId
-     - values
-     - consistency
-    """
-    self.send_execute_prepared_cql3_query(itemId, values, consistency)
-    return self.recv_execute_prepared_cql3_query()
-
-  def send_execute_prepared_cql3_query(self, itemId, values, consistency):
-    self._oprot.writeMessageBegin('execute_prepared_cql3_query', TMessageType.CALL, self._seqid)
-    args = execute_prepared_cql3_query_args()
-    args.itemId = itemId
-    args.values = values
-    args.consistency = consistency
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_execute_prepared_cql3_query(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = execute_prepared_cql3_query_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    if result.ire is not None:
-      raise result.ire
-    if result.ue is not None:
-      raise result.ue
-    if result.te is not None:
-      raise result.te
-    if result.sde is not None:
-      raise result.sde
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_prepared_cql3_query failed: unknown result");
-
-  def set_cql_version(self, version):
-    """
-    @deprecated This is now a no-op. Please use the CQL3 specific methods instead.
-
-    Parameters:
-     - version
-    """
-    self.send_set_cql_version(version)
-    self.recv_set_cql_version()
-
-  def send_set_cql_version(self, version):
-    self._oprot.writeMessageBegin('set_cql_version', TMessageType.CALL, self._seqid)
-    args = set_cql_version_args()
-    args.version = version
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_set_cql_version(self, ):
-    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(self._iprot)
-      self._iprot.readMessageEnd()
-      raise x
-    result = set_cql_version_result()
-    result.read(self._iprot)
-    self._iprot.readMessageEnd()
-    if result.ire is not None:
-      raise result.ire
-    return
-
-
-class Processor(Iface, TProcessor):
-  def __init__(self, handler):
-    self._handler = handler
-    self._processMap = {}
-    self._processMap["login"] = Processor.process_login
-    self._processMap["set_keyspace"] = Processor.process_set_keyspace
-    self._processMap["get"] = Processor.process_get
-    self._processMap["get_slice"] = Processor.process_get_slice
-    self._processMap["get_count"] = Processor.process_get_count
-    self._processMap["multiget_slice"] = Processor.process_multiget_slice
-    self._processMap["multiget_count"] = Processor.process_multiget_count
-    self._processMap["get_range_slices"] = Processor.process_get_range_slices
-    self._processMap["get_paged_slice"] = Processor.process_get_paged_slice
-    self._processMap["get_indexed_slices"] = Processor.process_get_indexed_slices
-    self._processMap["insert"] = Processor.process_insert
-    self._processMap["add"] = Processor.process_add
-    self._processMap["cas"] = Processor.process_cas
-    self._processMap["remove"] = Processor.process_remove
-    self._processMap["remove_counter"] = Processor.process_remove_counter
-    self._processMap["batch_mutate"] = Processor.process_batch_mutate
-    self._processMap["atomic_batch_mutate"] = Processor.process_atomic_batch_mutate
-    self._processMap["truncate"] = Processor.process_truncate
-    self._processMap["get_multi_slice"] = Processor.process_get_multi_slice
-    self._processMap["describe_schema_versions"] = Processor.process_describe_schema_versions
-    self._processMap["describe_keyspaces"] = Processor.process_describe_keyspaces
-    self._processMap["describe_cluster_name"] = Processor.process_describe_cluster_name
-    self._processMap["describe_version"] = Processor.process_describe_version
-    self._processMap["describe_ring"] = Processor.process_describe_ring
-    self._processMap["describe_local_ring"] = Processor.process_describe_local_ring
-    self._processMap["describe_token_map"] = Processor.process_describe_token_map
-    self._processMap["describe_partitioner"] = Processor.process_describe_partitioner
-    self._processMap["describe_snitch"] = Processor.process_describe_snitch
-    self._processMap["describe_keyspace"] = Processor.process_describe_keyspace
-    self._processMap["describe_splits"] = Processor.process_describe_splits
-    self._processMap["trace_next_query"] = Processor.process_trace_next_query
-    self._processMap["describe_splits_ex"] = Processor.process_describe_splits_ex
-    self._processMap["system_add_column_family"] = Processor.process_system_add_column_family
-    self._processMap["system_drop_column_family"] = Processor.process_system_drop_column_family
-    self._processMap["system_add_keyspace"] = Processor.process_system_add_keyspace
-    self._processMap["system_drop_keyspace"] = Processor.process_system_drop_keyspace
-    self._processMap["system_update_keyspace"] = Processor.process_system_update_keyspace
-    self._processMap["system_update_column_family"] = Processor.process_system_update_column_family
-    self._processMap["execute_cql_query"] = Processor.process_execute_cql_query
-    self._processMap["execute_cql3_query"] = Processor.process_execute_cql3_query
-    self._processMap["prepare_cql_query"] = Processor.process_prepare_cql_query
-    self._processMap["prepare_cql3_query"] = Processor.process_prepare_cql3_query
-    self._processMap["execute_prepared_cql_query"] = Processor.process_execute_prepared_cql_query
-    self._processMap["execute_prepared_cql3_query"] = Processor.process_execute_prepared_cql3_query
-    self._processMap["set_cql_version"] = Processor.process_set_cql_version
-
-  def process(self, iprot, oprot):
-    (name, type, seqid) = iprot.readMessageBegin()
-    if name not in self._processMap:
-      iprot.skip(TType.STRUCT)
-      iprot.readMessageEnd()
-      x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
-      oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
-      x.write(oprot)
-      oprot.writeMessageEnd()
-      oprot.trans.flush()
-      return
-    else:
-      self._processMap[name](self, seqid, iprot, oprot)
-    return True
-
-  def process_login(self, seqid, iprot, oprot):
-    args = login_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = login_result()
-    try:
-      self._handler.login(args.auth_request)
-    except AuthenticationException as authnx:
-      result.authnx = authnx
-    except AuthorizationException as authzx:
-      result.authzx = authzx
-    oprot.writeMessageBegin("login", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_set_keyspace(self, seqid, iprot, oprot):
-    args = set_keyspace_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = set_keyspace_result()
-    try:
-      self._handler.set_keyspace(args.keyspace)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    oprot.writeMessageBegin("set_keyspace", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_get(self, seqid, iprot, oprot):
-    args = get_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = get_result()
-    try:
-      result.success = self._handler.get(args.key, args.column_path, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except NotFoundException as nfe:
-      result.nfe = nfe
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("get", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_get_slice(self, seqid, iprot, oprot):
-    args = get_slice_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = get_slice_result()
-    try:
-      result.success = self._handler.get_slice(args.key, args.column_parent, args.predicate, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("get_slice", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_get_count(self, seqid, iprot, oprot):
-    args = get_count_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = get_count_result()
-    try:
-      result.success = self._handler.get_count(args.key, args.column_parent, args.predicate, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("get_count", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_multiget_slice(self, seqid, iprot, oprot):
-    args = multiget_slice_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = multiget_slice_result()
-    try:
-      result.success = self._handler.multiget_slice(args.keys, args.column_parent, args.predicate, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("multiget_slice", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_multiget_count(self, seqid, iprot, oprot):
-    args = multiget_count_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = multiget_count_result()
-    try:
-      result.success = self._handler.multiget_count(args.keys, args.column_parent, args.predicate, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("multiget_count", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_get_range_slices(self, seqid, iprot, oprot):
-    args = get_range_slices_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = get_range_slices_result()
-    try:
-      result.success = self._handler.get_range_slices(args.column_parent, args.predicate, args.range, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("get_range_slices", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_get_paged_slice(self, seqid, iprot, oprot):
-    args = get_paged_slice_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = get_paged_slice_result()
-    try:
-      result.success = self._handler.get_paged_slice(args.column_family, args.range, args.start_column, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("get_paged_slice", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_get_indexed_slices(self, seqid, iprot, oprot):
-    args = get_indexed_slices_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = get_indexed_slices_result()
-    try:
-      result.success = self._handler.get_indexed_slices(args.column_parent, args.index_clause, args.column_predicate, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("get_indexed_slices", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_insert(self, seqid, iprot, oprot):
-    args = insert_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = insert_result()
-    try:
-      self._handler.insert(args.key, args.column_parent, args.column, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("insert", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_add(self, seqid, iprot, oprot):
-    args = add_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = add_result()
-    try:
-      self._handler.add(args.key, args.column_parent, args.column, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("add", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_cas(self, seqid, iprot, oprot):
-    args = cas_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = cas_result()
-    try:
-      result.success = self._handler.cas(args.key, args.column_family, args.expected, args.updates, args.serial_consistency_level, args.commit_consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("cas", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_remove(self, seqid, iprot, oprot):
-    args = remove_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = remove_result()
-    try:
-      self._handler.remove(args.key, args.column_path, args.timestamp, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("remove", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_remove_counter(self, seqid, iprot, oprot):
-    args = remove_counter_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = remove_counter_result()
-    try:
-      self._handler.remove_counter(args.key, args.path, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("remove_counter", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_batch_mutate(self, seqid, iprot, oprot):
-    args = batch_mutate_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = batch_mutate_result()
-    try:
-      self._handler.batch_mutate(args.mutation_map, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    except TimedOutException as te:
-      result.te = te
-    oprot.writeMessageBegin("batch_mutate", TMessageType.REPLY, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_atomic_batch_mutate(self, seqid, iprot, oprot):
-    args = atomic_batch_mutate_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = atomic_batch_mutate_result()
-    try:
-      self._handler.atomic_batch_mutate(args.mutation_map, args.consistency_level)
-    except InvalidRequestException as ire:
-      result.ire = ire
-    except UnavailableException as ue:
-      result.ue = ue
-    ex

<TRUNCATED>
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/v22/__init__.py
----------------------------------------------------------------------
diff --git a/thrift_bindings/v22/__init__.py b/thrift_bindings/v22/__init__.py
deleted file mode 100644
index 2132df0..0000000
--- a/thrift_bindings/v22/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants', 'Cassandra']


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[27/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/dtest.py
----------------------------------------------------------------------
diff --git a/dtest.py b/dtest.py
index 25c52e3..aad9e58 100644
--- a/dtest.py
+++ b/dtest.py
@@ -1,50 +1,30 @@
-from __future__ import with_statement
-
-import ConfigParser
+import configparser
 import copy
-import errno
-import glob
 import logging
 import os
-import pprint
 import re
-import shutil
-import signal
 import subprocess
 import sys
-import tempfile
-import thread
 import threading
 import time
 import traceback
-import types
-import unittest.case
-from collections import OrderedDict
-from subprocess import CalledProcessError
-from unittest import TestCase
-
+import pytest
 import cassandra
 import ccmlib.repository
-from cassandra import ConsistencyLevel
+
+from subprocess import CalledProcessError
+
+from flaky import flaky
+
+from cassandra import ConsistencyLevel, OperationTimedOut
 from cassandra.auth import PlainTextAuthProvider
-from cassandra.cluster import Cluster as PyCluster
-from cassandra.cluster import NoHostAvailable
-from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT
-from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy
-from ccmlib.cluster import Cluster
-from ccmlib.cluster_factory import ClusterFactory
+from cassandra.cluster import ExecutionProfile
+from cassandra.policies import RetryPolicy, RoundRobinPolicy
 from ccmlib.common import get_version_from_build, is_win
+from ccmlib.node import ToolError, TimeoutError
 from distutils.version import LooseVersion
-from nose.exc import SkipTest
-from nose.tools import assert_greater_equal
-from six import print_
+from tools.misc import retry_till_success
 
-from plugins.dtestconfig import _CONFIG as CONFIG
-# We don't want test files to know about the plugins module, so we import
-# constants here and re-export them.
-from plugins.dtestconfig import GlobalConfigObject
-from tools.context import log_filter
-from tools.funcutils import merge_dicts
 
 LOG_SAVED_DIR = "logs"
 try:
@@ -57,52 +37,14 @@ LAST_LOG = os.path.join(LOG_SAVED_DIR, "last")
 LAST_TEST_DIR = 'last_test_dir'
 
 DEFAULT_DIR = './'
-config = ConfigParser.RawConfigParser()
+config = configparser.RawConfigParser()
 if len(config.read(os.path.expanduser('~/.cassandra-dtest'))) > 0:
     if config.has_option('main', 'default_dir'):
         DEFAULT_DIR = os.path.expanduser(config.get('main', 'default_dir'))
-CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR', DEFAULT_DIR)
-
-NO_SKIP = os.environ.get('SKIP', '').lower() in ('no', 'false')
-DEBUG = os.environ.get('DEBUG', '').lower() in ('yes', 'true')
-TRACE = os.environ.get('TRACE', '').lower() in ('yes', 'true')
-KEEP_LOGS = os.environ.get('KEEP_LOGS', '').lower() in ('yes', 'true')
-KEEP_TEST_DIR = os.environ.get('KEEP_TEST_DIR', '').lower() in ('yes', 'true')
-PRINT_DEBUG = os.environ.get('PRINT_DEBUG', '').lower() in ('yes', 'true')
-OFFHEAP_MEMTABLES = os.environ.get('OFFHEAP_MEMTABLES', '').lower() in ('yes', 'true')
-NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')
-RECORD_COVERAGE = os.environ.get('RECORD_COVERAGE', '').lower() in ('yes', 'true')
-IGNORE_REQUIRE = os.environ.get('IGNORE_REQUIRE', '').lower() in ('yes', 'true')
-DATADIR_COUNT = os.environ.get('DATADIR_COUNT', '3')
-ENABLE_ACTIVE_LOG_WATCHING = os.environ.get('ENABLE_ACTIVE_LOG_WATCHING', '').lower() in ('yes', 'true')
-RUN_STATIC_UPGRADE_MATRIX = os.environ.get('RUN_STATIC_UPGRADE_MATRIX', '').lower() in ('yes', 'true')
-
-# devault values for configuration from configuration plugin
-_default_config = GlobalConfigObject(
-    vnodes=True,
-)
-
-if CONFIG is None:
-    CONFIG = _default_config
 
-DISABLE_VNODES = not CONFIG.vnodes
-
-
-if os.environ.get('DISABLE_VNODES', '').lower() in ('yes', 'true'):
-    print 'DISABLE_VNODES environment variable deprecated. Use `./run_dtests.py --vnodes false` instead.'
-
-
-CURRENT_TEST = ""
-
-logging.basicConfig(filename=os.path.join(LOG_SAVED_DIR, "dtest.log"),
-                    filemode='w',
-                    format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
-                    datefmt='%H:%M:%S',
-                    level=logging.DEBUG)
+RUN_STATIC_UPGRADE_MATRIX = os.environ.get('RUN_STATIC_UPGRADE_MATRIX', '').lower() in ('yes', 'true')
 
-LOG = logging.getLogger('dtest')
-# set python-driver log level to INFO by default for dtest
-logging.getLogger('cassandra').setLevel(logging.INFO)
+logger = logging.getLogger(__name__)
 
 
 def get_sha(repo_dir):
@@ -111,11 +53,13 @@ def get_sha(repo_dir):
         prefix = 'github:apache/'
         local_repo_location = os.environ.get('LOCAL_GIT_REPO')
         if local_repo_location is not None:
-            prefix = 'local:{}:'.format(local_repo_location)  # local: slugs take the form 'local:/some/path/to/cassandra/:branch_name_or_sha'
+            prefix = 'local:{}:'.format(local_repo_location)
+            # local: slugs take the form 'local:/some/path/to/cassandra/:branch_name_or_sha'
         return "{}{}".format(prefix, output)
     except CalledProcessError as e:
-        if re.search('Not a git repository', e.message) is not None:
-            # we tried to get a sha, but repo_dir isn't a git repo. No big deal, must just be working from a non-git install.
+        if re.search(str(e), 'Not a git repository') is not None:
+            # we tried to get a sha, but repo_dir isn't a git repo. No big deal, must just be
+            # working from a non-git install.
             return None
         else:
             # git call failed for some unknown reason
@@ -137,33 +81,12 @@ if _cassandra_version_slug:
     CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(ccm_repo_cache_dir)
     CASSANDRA_GITREF = get_sha(ccm_repo_cache_dir)  # will be set None when not a git repo
 else:
-    CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(CASSANDRA_DIR)
-    CASSANDRA_GITREF = get_sha(CASSANDRA_DIR)
-
+    CASSANDRA_VERSION_FROM_BUILD = LooseVersion("4.0") # todo kjkjkj
+    CASSANDRA_GITREF = ""
+    #CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(self.dtest_config.cassandra_dir)
+    #CASSANDRA_GITREF = get_sha(dtest_config.cassandra_dir)
 
-# Determine the location of the libjemalloc jar so that we can specify it
-# through environment variables when start Cassandra.  This reduces startup
-# time, making the dtests run faster.
-def find_libjemalloc():
-    if is_win():
-        # let the normal bat script handle finding libjemalloc
-        return ""
 
-    this_dir = os.path.dirname(os.path.realpath(__file__))
-    script = os.path.join(this_dir, "findlibjemalloc.sh")
-    try:
-        p = subprocess.Popen([script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-        stdout, stderr = p.communicate()
-        if stderr or not stdout:
-            return "-"  # tells C* not to look for libjemalloc
-        else:
-            return stdout
-    except Exception as exc:
-        print "Failed to run script to prelocate libjemalloc ({}): {}".format(script, exc)
-        return ""
-
-
-CASSANDRA_LIBJEMALLOC = find_libjemalloc()
 # copy the initial environment variables so we can reset them later:
 initial_environment = copy.deepcopy(os.environ)
 
@@ -172,40 +95,7 @@ class DtestTimeoutError(Exception):
     pass
 
 
-def reset_environment_vars():
-    os.environ.clear()
-    os.environ.update(initial_environment)
-
-
-def warning(msg):
-    LOG.warning("{} - {}".format(CURRENT_TEST, msg))
-    if PRINT_DEBUG:
-        print "WARN: " + msg
-
-
-def debug(msg):
-    LOG.debug("{} - {}".format(CURRENT_TEST, msg))
-    if PRINT_DEBUG:
-        print msg
-
-
-debug("Python driver version in use: {}".format(cassandra.__version__))
-
-
-def retry_till_success(fun, *args, **kwargs):
-    timeout = kwargs.pop('timeout', 60)
-    bypassed_exception = kwargs.pop('bypassed_exception', Exception)
-
-    deadline = time.time() + timeout
-    while True:
-        try:
-            return fun(*args, **kwargs)
-        except bypassed_exception:
-            if time.time() > deadline:
-                raise
-            else:
-                # brief pause before next attempt
-                time.sleep(0.25)
+logger.debug("Python driver version in use: {}".format(cassandra.__version__))
 
 
 class FlakyRetryPolicy(RetryPolicy):
@@ -219,21 +109,21 @@ class FlakyRetryPolicy(RetryPolicy):
 
     def on_read_timeout(self, *args, **kwargs):
         if kwargs['retry_num'] < self.max_retries:
-            debug("Retrying read after timeout. Attempt #" + str(kwargs['retry_num']))
+            logger.debug("Retrying read after timeout. Attempt #" + str(kwargs['retry_num']))
             return (self.RETRY, None)
         else:
             return (self.RETHROW, None)
 
     def on_write_timeout(self, *args, **kwargs):
         if kwargs['retry_num'] < self.max_retries:
-            debug("Retrying write after timeout. Attempt #" + str(kwargs['retry_num']))
+            logger.debug("Retrying write after timeout. Attempt #" + str(kwargs['retry_num']))
             return (self.RETRY, None)
         else:
             return (self.RETHROW, None)
 
     def on_unavailable(self, *args, **kwargs):
         if kwargs['retry_num'] < self.max_retries:
-            debug("Retrying request after UE. Attempt #" + str(kwargs['retry_num']))
+            logger.debug("Retrying request after UE. Attempt #" + str(kwargs['retry_num']))
             return (self.RETRY, None)
         else:
             return (self.RETHROW, None)
@@ -261,8 +151,18 @@ class Runner(threading.Thread):
             i = i + 1
 
     def stop(self):
+        if self.__stopped:
+            return
+
         self.__stopped = True
-        self.join()
+        # pytests may appear to hang forever waiting for cluster tear down. are all driver session objects shutdown?
+        # to debug hang you can add the following at the top of the test
+        #     import faulthandler
+        #     faulthandler.enable()
+        #
+        # and then when the hang occurs send a SIGABRT to the pytest process (e.g. kill -SIGABRT <pytest_pid>)
+        # this will print a python thread dump of all currently alive threads
+        self.join(timeout=30)
         if self.__error is not None:
             raise self.__error
 
@@ -272,344 +172,103 @@ class Runner(threading.Thread):
 
 
 def make_execution_profile(retry_policy=FlakyRetryPolicy(), consistency_level=ConsistencyLevel.ONE, **kwargs):
-    return ExecutionProfile(retry_policy=retry_policy,
-                            consistency_level=consistency_level,
-                            **kwargs)
-
-
-class Tester(TestCase):
-
-    maxDiff = None
-    allow_log_errors = False  # scan the log of each node for errors after every test.
-    cluster_options = None
-
-    def set_node_to_current_version(self, node):
-        version = os.environ.get('CASSANDRA_VERSION')
-        cdir = CASSANDRA_DIR
-
-        if version:
-            node.set_install_dir(version=version)
-        else:
-            node.set_install_dir(install_dir=cdir)
-
-    def init_config(self):
-        init_default_config(self.cluster, self.cluster_options)
-
-    def setUp(self):
-        self.set_current_tst_name()
-        kill_windows_cassandra_procs()
-        maybe_cleanup_cluster_from_last_test_file()
-
-        self.test_path = get_test_path()
-        self.cluster = create_ccm_cluster(self.test_path, name='test')
-
-        self.maybe_begin_active_log_watch()
-        maybe_setup_jacoco(self.test_path)
-
-        self.init_config()
-        write_last_test_file(self.test_path, self.cluster)
-
-        set_log_levels(self.cluster)
-        self.connections = []
-        self.runners = []
-
-    # this is intentionally spelled 'tst' instead of 'test' to avoid
-    # making unittest think it's a test method
-    def set_current_tst_name(self):
-        global CURRENT_TEST
-        CURRENT_TEST = self.id()
-
-    def maybe_begin_active_log_watch(self):
-        if ENABLE_ACTIVE_LOG_WATCHING:
-            if not self.allow_log_errors:
-                self.begin_active_log_watch()
-
-    def begin_active_log_watch(self):
-        """
-        Calls into ccm to start actively watching logs.
-
-        In the event that errors are seen in logs, ccm will call back to _log_error_handler.
-
-        When the cluster is no longer in use, stop_active_log_watch should be called to end log watching.
-        (otherwise a 'daemon' thread will (needlessly) run until the process exits).
-        """
-        # log watching happens in another thread, but we want it to halt the main
-        # thread's execution, which we have to do by registering a signal handler
-        signal.signal(signal.SIGINT, self._catch_interrupt)
-        self._log_watch_thread = self.cluster.actively_watch_logs_for_error(self._log_error_handler, interval=0.25)
-
-    def _log_error_handler(self, errordata):
-        """
-        Callback handler used in conjunction with begin_active_log_watch.
-        When called, prepares exception instance, then will indirectly
-        cause _catch_interrupt to be called, which can raise the exception in the main
-        program thread.
-
-        @param errordata is a dictonary mapping node name to failure list.
-        """
-        # in some cases self.allow_log_errors may get set after proactive log checking has been enabled
-        # so we need to double-check first thing before proceeding
-        if self.allow_log_errors:
-            return
-
-        reportable_errordata = OrderedDict()
-
-        for nodename, errors in errordata.items():
-            filtered_errors = list(self.__filter_errors(['\n'.join(msg) for msg in errors]))
-            if len(filtered_errors) is not 0:
-                reportable_errordata[nodename] = filtered_errors
+    if 'load_balancing_policy' in kwargs:
+        return ExecutionProfile(retry_policy=retry_policy,
+                                consistency_level=consistency_level,
+                                **kwargs)
+    else:
+        return ExecutionProfile(retry_policy=retry_policy,
+                                consistency_level=consistency_level,
+                                load_balancing_policy=RoundRobinPolicy(),
+                                **kwargs)
 
-        # no errors worthy of halting the test
-        if not reportable_errordata:
-            return
 
-        message = "Errors seen in logs for: {nodes}".format(nodes=", ".join(reportable_errordata.keys()))
-        for nodename, errors in reportable_errordata.items():
-            for error in errors:
-                message += "\n{nodename}: {error}".format(nodename=nodename, error=error)
+def running_in_docker():
+    return os.path.isfile('/.dockerenv')
 
-        try:
-            debug('Errors were just seen in logs, ending test (if not ending already)!')
-            print_("Error details: \n{message}".format(message=message))
-            self.test_is_ending  # will raise AttributeError if not present
-        except AttributeError:
-            self.test_is_ending = True
-            self.exit_with_exception = AssertionError("Log error encountered during active log scanning, see stdout")
-            # thread.interrupt_main will SIGINT in the main thread, which we can
-            # catch to raise an exception with useful information
-            thread.interrupt_main()
 
+def cleanup_docker_environment_before_test_execution():
+    """
+    perform a bunch of system cleanup operations, like kill any instances that might be
+    hanging around incorrectly from a previous run, sync the disk, and clear swap.
+    Ideally we would also drop the page cache, but as docker isn't running in privileged
+    mode there is no way for us to do this.
     """
-    Finds files matching the glob pattern specified as argument on
-    the given keyspace in all nodes
+    # attempt to wack all existing running Cassandra processes forcefully to get us into a clean state
+    p_kill = subprocess.Popen('ps aux | grep -ie CassandraDaemon | grep java | awk \'{print $2}\' | xargs kill -9',
+                              shell=True)
+    p_kill.wait(timeout=10)
+
+    # explicitly call "sync" to flush everything that might be pending from a previous test
+    # so tests are less likely to hit a very slow fsync during the test by starting from a 'known' state
+    # note: to mitigate this further the docker image is mounting /tmp as a volume, which gives
+    # us an ext4 mount which should talk directly to the underlying device on the host, skipping
+    # the aufs pain that we get with anything else running in the docker image. Originally,
+    # I had a timeout of 120 seconds (2 minutes), 300 seconds (5 minutes) but sync was still occasionally timing out.
+    p_sync = subprocess.Popen('sudo /bin/sync', shell=True)
+    p_sync.wait(timeout=600)
+
+    # turn swap off and back on to make sure it's fully cleared if anything happened to swap
+    # from a previous test run
+    p_swap = subprocess.Popen('sudo /sbin/swapoff -a && sudo /sbin/swapon -a', shell=True)
+    p_swap.wait(timeout=60)
+
+
+def test_failure_due_to_timeout(err, *args):
     """
+    check if we should rerun a test with the flaky plugin or not.
+    for now, only run if we failed the test for one of the following
+    three exceptions: cassandra.OperationTimedOut, ccm.node.ToolError,
+    and ccm.node.TimeoutError.
+
+    - cassandra.OperationTimedOut will be thrown when a cql query made thru
+    the python-driver times out.
+    - ccm.node.ToolError will be thrown when an invocation of a "tool"
+    (in the case of dtests this will almost always invoking stress).
+    - ccm.node.TimeoutError will be thrown when a blocking ccm operation
+    on a individual node times out. In most cases this tends to be something
+    like watch_log_for hitting the timeout before the desired pattern is seen
+    in the node's logs.
+
+    if we failed for one of these reasons - and we're running in docker - run
+    the same "cleanup" logic we run before test execution and test setup begins
+    and for good measure introduce a 2 second sleep. why 2 seconds? because it's
+    magic :) - ideally this gets the environment back into a good state and makes
+    the rerun of flaky tests likely to suceed if they failed in the first place
+    due to environmental issues.
+    """
+    if issubclass(err[0], OperationTimedOut) or issubclass(err[0], ToolError) or issubclass(err[0], TimeoutError):
+        if running_in_docker():
+            cleanup_docker_environment_before_test_execution()
+            time.sleep(2)
+        return True
+    else:
+        return False
 
-    def glob_data_dirs(self, path, ks="ks"):
-        result = []
-        for node in self.cluster.nodelist():
-            for data_dir in node.data_directories():
-                ks_dir = os.path.join(data_dir, ks, path)
-                result.extend(glob.glob(ks_dir))
-        return result
-
-    def _catch_interrupt(self, signal, frame):
-        """
-        Signal handler for registering on SIGINT.
-
-        If called will look for a stored exception and raise it to abort test.
-        If a stored exception is not present, this handler has likely caught a
-        user interrupt via CTRL-C, and will raise a KeyboardInterrupt.
-        """
-        try:
-            # check if we have a persisted exception to fail with
-            raise self.exit_with_exception
-        except AttributeError:
-            # looks like this was just a plain CTRL-C event
-            raise KeyboardInterrupt()
-
-    def copy_logs(self, cluster, directory=None, name=None):
-        """Copy the current cluster's log files somewhere, by default to LOG_SAVED_DIR with a name of 'last'"""
-        if directory is None:
-            directory = LOG_SAVED_DIR
-        if name is None:
-            name = LAST_LOG
-        else:
-            name = os.path.join(directory, name)
-        if not os.path.exists(directory):
-            os.mkdir(directory)
-        logs = [(node.name, node.logfilename(), node.debuglogfilename(), node.gclogfilename(), node.compactionlogfilename())
-                for node in self.cluster.nodes.values()]
-        if len(logs) is not 0:
-            basedir = str(int(time.time() * 1000)) + '_' + self.id()
-            logdir = os.path.join(directory, basedir)
-            os.mkdir(logdir)
-            for n, log, debuglog, gclog, compactionlog in logs:
-                if os.path.exists(log):
-                    self.assertGreaterEqual(os.path.getsize(log), 0)
-                    shutil.copyfile(log, os.path.join(logdir, n + ".log"))
-                if os.path.exists(debuglog):
-                    self.assertGreaterEqual(os.path.getsize(debuglog), 0)
-                    shutil.copyfile(debuglog, os.path.join(logdir, n + "_debug.log"))
-                if os.path.exists(gclog):
-                    self.assertGreaterEqual(os.path.getsize(gclog), 0)
-                    shutil.copyfile(gclog, os.path.join(logdir, n + "_gc.log"))
-                if os.path.exists(compactionlog):
-                    self.assertGreaterEqual(os.path.getsize(compactionlog), 0)
-                    shutil.copyfile(compactionlog, os.path.join(logdir, n + "_compaction.log"))
-            if os.path.exists(name):
-                os.unlink(name)
-            if not is_win():
-                os.symlink(basedir, name)
-
-    def cql_connection(self, node, keyspace=None, user=None,
-                       password=None, compression=True, protocol_version=None, port=None, ssl_opts=None, **kwargs):
-
-        return self._create_session(node, keyspace, user, password, compression,
-                                    protocol_version, port=port, ssl_opts=ssl_opts, **kwargs)
-
-    def exclusive_cql_connection(self, node, keyspace=None, user=None,
-                                 password=None, compression=True, protocol_version=None, port=None, ssl_opts=None, **kwargs):
-
-        node_ip = get_ip_from_node(node)
-        wlrr = WhiteListRoundRobinPolicy([node_ip])
-
-        return self._create_session(node, keyspace, user, password, compression,
-                                    protocol_version, port=port, ssl_opts=ssl_opts, load_balancing_policy=wlrr, **kwargs)
-
-    def _create_session(self, node, keyspace, user, password, compression, protocol_version,
-                        port=None, ssl_opts=None, execution_profiles=None, **kwargs):
-        node_ip = get_ip_from_node(node)
-        if not port:
-            port = get_port_from_node(node)
-
-        if protocol_version is None:
-            protocol_version = get_eager_protocol_version(node.cluster.version())
-
-        if user is not None:
-            auth_provider = get_auth_provider(user=user, password=password)
-        else:
-            auth_provider = None
-
-        profiles = {EXEC_PROFILE_DEFAULT: make_execution_profile(**kwargs)
-                    } if not execution_profiles else execution_profiles
-
-        cluster = PyCluster([node_ip],
-                            auth_provider=auth_provider,
-                            compression=compression,
-                            protocol_version=protocol_version,
-                            port=port,
-                            ssl_options=ssl_opts,
-                            connect_timeout=10,
-                            allow_beta_protocol_version=True,
-                            execution_profiles=profiles)
-        session = cluster.connect(wait_for_all_pools=True)
-
-        if keyspace is not None:
-            session.set_keyspace(keyspace)
-
-        self.connections.append(session)
-        return session
-
-    def patient_cql_connection(self, node, keyspace=None,
-                               user=None, password=None, timeout=30, compression=True,
-                               protocol_version=None, port=None, ssl_opts=None, **kwargs):
-        """
-        Returns a connection after it stops throwing NoHostAvailables due to not being ready.
-
-        If the timeout is exceeded, the exception is raised.
-        """
-        if is_win():
-            timeout *= 2
-
-        expected_log_lines = ('Control connection failed to connect, shutting down Cluster:', '[control connection] Error connecting to ')
-        with log_filter('cassandra.cluster', expected_log_lines):
-            session = retry_till_success(
-                self.cql_connection,
-                node,
-                keyspace=keyspace,
-                user=user,
-                password=password,
-                timeout=timeout,
-                compression=compression,
-                protocol_version=protocol_version,
-                port=port,
-                ssl_opts=ssl_opts,
-                bypassed_exception=NoHostAvailable,
-                **kwargs
-            )
-
-        return session
-
-    def patient_exclusive_cql_connection(self, node, keyspace=None,
-                                         user=None, password=None, timeout=30, compression=True,
-                                         protocol_version=None, port=None, ssl_opts=None, **kwargs):
-        """
-        Returns a connection after it stops throwing NoHostAvailables due to not being ready.
-
-        If the timeout is exceeded, the exception is raised.
-        """
-        if is_win():
-            timeout *= 2
-
-        return retry_till_success(
-            self.exclusive_cql_connection,
-            node,
-            keyspace=keyspace,
-            user=user,
-            password=password,
-            timeout=timeout,
-            compression=compression,
-            protocol_version=protocol_version,
-            port=port,
-            ssl_opts=ssl_opts,
-            bypassed_exception=NoHostAvailable,
-            **kwargs
-        )
-
-    @classmethod
-    def tearDownClass(cls):
-        reset_environment_vars()
-        if os.path.exists(LAST_TEST_DIR):
-            with open(LAST_TEST_DIR) as f:
-                test_path = f.readline().strip('\n')
-                name = f.readline()
-                try:
-                    cluster = ClusterFactory.load(test_path, name)
-                    # Avoid waiting too long for node to be marked down
-                    if KEEP_TEST_DIR:
-                        cluster.stop(gently=RECORD_COVERAGE)
-                    else:
-                        cluster.remove()
-                        os.rmdir(test_path)
-                except IOError:
-                    # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here
-                    pass
-            try:
-                os.remove(LAST_TEST_DIR)
-            except IOError:
-                # Ignore - see comment above
-                pass
 
-    def tearDown(self):
-        # test_is_ending prevents active log watching from being able to interrupt the test
-        # which we don't want to happen once tearDown begins
-        self.test_is_ending = True
+@flaky(rerun_filter=test_failure_due_to_timeout)
+class Tester:
 
-        reset_environment_vars()
+    def __getattribute__(self, name):
+        try:
+            return object.__getattribute__(self, name)
+        except AttributeError:
+            fixture_dtest_setup = object.__getattribute__(self, 'fixture_dtest_setup')
+            return object.__getattribute__(fixture_dtest_setup , name)
 
-        for con in self.connections:
-            con.cluster.shutdown()
+    @pytest.fixture(scope='function', autouse=True)
+    def set_dtest_setup_on_function(self, fixture_dtest_setup, fixture_dtest_config):
+        self.fixture_dtest_setup = fixture_dtest_setup
+        self.dtest_config = fixture_dtest_config
 
-        for runner in self.runners:
-            try:
-                runner.stop()
-            except Exception:
-                pass
+    def set_node_to_current_version(self, node):
+        version = os.environ.get('CASSANDRA_VERSION')
 
-        failed = did_fail()
-        try:
-            if not self.allow_log_errors and self.check_logs_for_errors():
-                failed = True
-                raise AssertionError('Unexpected error in log, see stdout')
-        finally:
-            try:
-                # save the logs for inspection
-                if failed or KEEP_LOGS:
-                    self.copy_logs(self.cluster)
-            except Exception as e:
-                print "Error saving log:", str(e)
-            finally:
-                log_watch_thread = getattr(self, '_log_watch_thread', None)
-                cleanup_cluster(self.cluster, self.test_path, log_watch_thread)
-
-    def check_logs_for_errors(self):
-        for node in self.cluster.nodelist():
-            errors = list(self.__filter_errors(
-                ['\n'.join(msg) for msg in node.grep_log_for_errors()]))
-            if len(errors) is not 0:
-                for error in errors:
-                    print_("Unexpected error in {node_name} log, error: \n{error}".format(node_name=node.name, error=error))
-                return True
+        if version:
+            node.set_install_dir(version=version)
+        else:
+            node.set_install_dir(install_dir=self.dtest_config.cassandra_dir)
+            os.environ.set('CASSANDRA_DIR', self.dtest_config.cassandra_dir)
 
     def go(self, func):
         runner = Runner(func)
@@ -617,57 +276,6 @@ class Tester(TestCase):
         runner.start()
         return runner
 
-    def skip(self, msg):
-        if not NO_SKIP:
-            raise SkipTest(msg)
-
-    def __filter_errors(self, errors):
-        """Filter errors, removing those that match self.ignore_log_patterns"""
-        if not hasattr(self, 'ignore_log_patterns'):
-            self.ignore_log_patterns = []
-        for e in errors:
-            for pattern in self.ignore_log_patterns:
-                if re.search(pattern, e):
-                    break
-            else:
-                yield e
-
-    # Disable docstrings printing in nosetest output
-    def shortDescription(self):
-        return None
-
-    def get_jfr_jvm_args(self):
-        """
-        @return The JVM arguments required for attaching flight recorder to a Java process.
-        """
-        return ["-XX:+UnlockCommercialFeatures", "-XX:+FlightRecorder"]
-
-    def start_jfr_recording(self, nodes):
-        """
-        Start Java flight recorder provided the cluster was started with the correct jvm arguments.
-        """
-        for node in nodes:
-            p = subprocess.Popen(['jcmd', str(node.pid), 'JFR.start'],
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.PIPE)
-            stdout, stderr = p.communicate()
-            debug(stdout)
-            debug(stderr)
-
-    def dump_jfr_recording(self, nodes):
-        """
-        Save Java flight recorder results to file for analyzing with mission control.
-        """
-        for node in nodes:
-            p = subprocess.Popen(['jcmd', str(node.pid), 'JFR.dump',
-                                  'recording=1', 'filename=recording_{}.jfr'.format(node.address())],
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.PIPE)
-            stdout, stderr = p.communicate()
-            debug(stdout)
-            debug(stderr)
-
-
 def get_eager_protocol_version(cassandra_version):
     """
     Returns the highest protocol version accepted
@@ -690,7 +298,7 @@ def create_cf(session, name, key_type="varchar", speculative_retry=None, read_re
 
     additional_columns = ""
     if columns is not None:
-        for k, v in columns.items():
+        for k, v in list(columns.items()):
             additional_columns = "{}, {} {}".format(additional_columns, k, v)
 
     if additional_columns == "":
@@ -714,20 +322,42 @@ def create_cf(session, name, key_type="varchar", speculative_retry=None, read_re
     if compact_storage:
         query += ' AND COMPACT STORAGE'
 
-    session.execute(query)
-    time.sleep(0.2)
-
+    try:
+        retry_till_success(session.execute, query=query, timeout=120, bypassed_exception=cassandra.OperationTimedOut)
+    except cassandra.AlreadyExists:
+        logger.warn('AlreadyExists executing create cf query \'%s\'' % query)
+    session.cluster.control_connection.wait_for_schema_agreement(wait_time=120)
+    #Going to ignore OperationTimedOut from create CF, so need to validate it was indeed created
+    session.execute('SELECT * FROM %s LIMIT 1' % name);
+
+def create_cf_simple(session, name, query):
+    try:
+        retry_till_success(session.execute, query=query, timeout=120, bypassed_exception=cassandra.OperationTimedOut)
+    except cassandra.AlreadyExists:
+        logger.warn('AlreadyExists executing create cf query \'%s\'' % query)
+    session.cluster.control_connection.wait_for_schema_agreement(wait_time=120)
+    #Going to ignore OperationTimedOut from create CF, so need to validate it was indeed created
+    session.execute('SELECT * FROM %s LIMIT 1' % name)
 
 def create_ks(session, name, rf):
     query = 'CREATE KEYSPACE %s WITH replication={%s}'
-    if isinstance(rf, types.IntType):
+    if isinstance(rf, int):
         # we assume simpleStrategy
-        session.execute(query % (name, "'class':'SimpleStrategy', 'replication_factor':%d" % rf))
+        query = query % (name, "'class':'SimpleStrategy', 'replication_factor':%d" % rf)
     else:
-        assert_greater_equal(len(rf), 0, "At least one datacenter/rf pair is needed")
+        assert len(rf) >= 0, "At least one datacenter/rf pair is needed"
         # we assume networkTopologyStrategy
-        options = (', ').join(['\'%s\':%d' % (d, r) for d, r in rf.iteritems()])
-        session.execute(query % (name, "'class':'NetworkTopologyStrategy', %s" % options))
+        options = (', ').join(['\'%s\':%d' % (d, r) for d, r in rf.items()])
+        query = query % (name, "'class':'NetworkTopologyStrategy', %s" % options)
+
+    try:
+        retry_till_success(session.execute, query=query, timeout=120, bypassed_exception=cassandra.OperationTimedOut)
+    except cassandra.AlreadyExists:
+        logger.warn('AlreadyExists executing create ks query \'%s\'' % query)
+
+    session.cluster.control_connection.wait_for_schema_agreement(wait_time=120)
+    #Also validates it was indeed created even though we ignored OperationTimedOut
+    #Might happen some of the time because CircleCI disk IO is unreliable and hangs randomly
     session.execute('USE {}'.format(name))
 
 
@@ -774,301 +404,13 @@ def kill_windows_cassandra_procs():
                     pass
                 else:
                     if (pinfo['name'] == 'java.exe' and '-Dcassandra' in pinfo['cmdline']):
-                        print 'Found running cassandra process with pid: ' + str(pinfo['pid']) + '. Killing.'
+                        print('Found running cassandra process with pid: ' + str(pinfo['pid']) + '. Killing.')
                         psutil.Process(pinfo['pid']).kill()
         except ImportError:
-            debug("WARN: psutil not installed. Cannot detect and kill "
+            logger.debug("WARN: psutil not installed. Cannot detect and kill "
                   "running cassandra processes - you may see cascading dtest failures.")
 
 
-def get_test_path():
-    test_path = tempfile.mkdtemp(prefix='dtest-')
-
-    # ccm on cygwin needs absolute path to directory - it crosses from cygwin space into
-    # regular Windows space on wmic calls which will otherwise break pathing
-    if sys.platform == "cygwin":
-        process = subprocess.Popen(["cygpath", "-m", test_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-        test_path = process.communicate()[0].rstrip()
-
-    return test_path
-
-
-# nose will discover this as a test, so we manually make it not a test
-get_test_path.__test__ = False
-
-
-def create_ccm_cluster(test_path, name):
-    debug("cluster ccm directory: " + test_path)
-    version = os.environ.get('CASSANDRA_VERSION')
-    cdir = CASSANDRA_DIR
-
-    if version:
-        cluster = Cluster(test_path, name, cassandra_version=version)
-    else:
-        cluster = Cluster(test_path, name, cassandra_dir=cdir)
-
-    if DISABLE_VNODES:
-        cluster.set_configuration_options(values={'num_tokens': None})
-    else:
-        cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})
-
-    if OFFHEAP_MEMTABLES:
-        cluster.set_configuration_options(values={'memtable_allocation_type': 'offheap_objects'})
-
-    cluster.set_datadir_count(DATADIR_COUNT)
-    cluster.set_environment_variable('CASSANDRA_LIBJEMALLOC', CASSANDRA_LIBJEMALLOC)
-
-    return cluster
-
-
-def cleanup_cluster(cluster, test_path, log_watch_thread=None):
-    with log_filter('cassandra'):  # quiet noise from driver when nodes start going down
-        if KEEP_TEST_DIR:
-            cluster.stop(gently=RECORD_COVERAGE)
-        else:
-            # when recording coverage the jvm has to exit normally
-            # or the coverage information is not written by the jacoco agent
-            # otherwise we can just kill the process
-            if RECORD_COVERAGE:
-                cluster.stop(gently=True)
-
-            # Cleanup everything:
-            try:
-                if log_watch_thread:
-                    stop_active_log_watch(log_watch_thread)
-            finally:
-                debug("removing ccm cluster {name} at: {path}".format(name=cluster.name, path=test_path))
-                cluster.remove()
-
-                debug("clearing ssl stores from [{0}] directory".format(test_path))
-                for filename in ('keystore.jks', 'truststore.jks', 'ccm_node.cer'):
-                    try:
-                        os.remove(os.path.join(test_path, filename))
-                    except OSError as e:
-                        # once we port to py3, which has better reporting for exceptions raised while
-                        # handling other excpetions, we should just assert e.errno == errno.ENOENT
-                        if e.errno != errno.ENOENT:  # ENOENT = no such file or directory
-                            raise
-
-                os.rmdir(test_path)
-                cleanup_last_test_dir()
-
-
-def cleanup_last_test_dir():
-    if os.path.exists(LAST_TEST_DIR):
-        os.remove(LAST_TEST_DIR)
-
-
-def stop_active_log_watch(log_watch_thread):
-    """
-    Joins the log watching thread, which will then exit.
-    Should be called after each test, ideally after nodes are stopped but before cluster files are removed.
-
-    Can be called multiple times without error.
-    If not called, log watching thread will remain running until the parent process exits.
-    """
-    log_watch_thread.join(timeout=60)
-
-
-def maybe_cleanup_cluster_from_last_test_file():
-    # cleaning up if a previous execution didn't trigger tearDown (which
-    # can happen if it is interrupted by KeyboardInterrupt)
-    if os.path.exists(LAST_TEST_DIR):
-        with open(LAST_TEST_DIR) as f:
-            test_path = f.readline().strip('\n')
-            name = f.readline()
-        try:
-            cluster = ClusterFactory.load(test_path, name)
-            # Avoid waiting too long for node to be marked down
-            cleanup_cluster(cluster, test_path)
-        except IOError:
-            # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here
-            pass
-
-
-def init_default_config(cluster, cluster_options):
-    # the failure detector can be quite slow in such tests with quick start/stop
-    phi_values = {'phi_convict_threshold': 5}
-
-    timeout = 10000
-    if cluster_options is not None:
-        values = merge_dicts(cluster_options, phi_values)
-    else:
-        values = merge_dicts(phi_values, {
-            'read_request_timeout_in_ms': timeout,
-            'range_request_timeout_in_ms': timeout,
-            'write_request_timeout_in_ms': timeout,
-            'truncate_request_timeout_in_ms': timeout,
-            'request_timeout_in_ms': timeout
-        })
-
-    # No more thrift in 4.0, and start_rpc doesn't exists anymore
-    if cluster.version() >= '4' and 'start_rpc' in values:
-        del values['start_rpc']
-
-    cluster.set_configuration_options(values)
-    debug("Done setting configuration options:\n" + pprint.pformat(cluster._config_options, indent=4))
-
-
-def write_last_test_file(test_path, cluster):
-    with open(LAST_TEST_DIR, 'w') as f:
-        f.write(test_path + '\n')
-        f.write(cluster.name)
-
-
-def set_log_levels(cluster):
-    if DEBUG:
-        cluster.set_log_level("DEBUG")
-    if TRACE:
-        cluster.set_log_level("TRACE")
-
-    if os.environ.get('DEBUG', 'no').lower() not in ('no', 'false', 'yes', 'true'):
-        classes_to_debug = os.environ.get('DEBUG').split(":")
-        cluster.set_log_level('DEBUG', None if len(classes_to_debug) == 0 else classes_to_debug)
-
-    if os.environ.get('TRACE', 'no').lower() not in ('no', 'false', 'yes', 'true'):
-        classes_to_trace = os.environ.get('TRACE').split(":")
-        cluster.set_log_level('TRACE', None if len(classes_to_trace) == 0 else classes_to_trace)
-
-
-def maybe_setup_jacoco(test_path, cluster_name='test'):
-    """Setup JaCoCo code coverage support"""
-
-    if not RECORD_COVERAGE:
-        return
-
-    # use explicit agent and execfile locations
-    # or look for a cassandra build if they are not specified
-    cdir = CASSANDRA_DIR
-
-    agent_location = os.environ.get('JACOCO_AGENT_JAR', os.path.join(cdir, 'build/lib/jars/jacocoagent.jar'))
-    jacoco_execfile = os.environ.get('JACOCO_EXECFILE', os.path.join(cdir, 'build/jacoco/jacoco.exec'))
-
-    if os.path.isfile(agent_location):
-        debug("Jacoco agent found at {}".format(agent_location))
-        with open(os.path.join(
-                test_path, cluster_name, 'cassandra.in.sh'), 'w') as f:
-
-            f.write('JVM_OPTS="$JVM_OPTS -javaagent:{jar_path}=destfile={exec_file}"'
-                    .format(jar_path=agent_location, exec_file=jacoco_execfile))
-
-            if os.path.isfile(jacoco_execfile):
-                debug("Jacoco execfile found at {}, execution data will be appended".format(jacoco_execfile))
-            else:
-                debug("Jacoco execfile will be created at {}".format(jacoco_execfile))
-    else:
-        debug("Jacoco agent not found or is not file. Execution will not be recorded.")
-
-
-def did_fail():
-    if sys.exc_info() == (None, None, None):
-        return False
-
-    exc_class, _, _ = sys.exc_info()
-    return not issubclass(exc_class, unittest.case.SkipTest)
-
-
-class ReusableClusterTester(Tester):
-    """
-    A Tester designed for reusing the same cluster across multiple
-    test methods.  This makes test suites with many small tests run
-    much, much faster.  However, there are a couple of downsides:
-
-    First, test setup and teardown must be diligent about cleaning
-    up any data or schema elements that may interfere with other
-    tests.
-
-    Second, errors triggered by one test method may cascade
-    into other test failures.  In an attempt to limit this, the
-    cluster will be restarted if a test fails or an exception is
-    caught.  However, there may still be undetected problems in
-    Cassandra that cause cascading failures.
-    """
-
-    test_path = None
-    cluster = None
-    cluster_options = None
-
-    @classmethod
-    def setUpClass(cls):
-        kill_windows_cassandra_procs()
-        maybe_cleanup_cluster_from_last_test_file()
-        cls.initialize_cluster()
-
-    def setUp(self):
-        self.set_current_tst_name()
-        self.connections = []
-
-        # TODO enable active log watching
-        # This needs to happen in setUp() and not setUpClass() so that individual
-        # test methods can set allow_log_errors and so that error handling
-        # only fails a single test method instead of the entire class.
-        # The problem with this is that ccm doesn't yet support stopping the
-        # active log watcher -- it runs until the cluster is destroyed.  Since
-        # we reuse the same cluster, this doesn't work for us.
-
-    def tearDown(self):
-        # test_is_ending prevents active log watching from being able to interrupt the test
-        self.test_is_ending = True
-
-        failed = did_fail()
-        try:
-            if not self.allow_log_errors and self.check_logs_for_errors():
-                failed = True
-                raise AssertionError('Unexpected error in log, see stdout')
-        finally:
-            try:
-                # save the logs for inspection
-                if failed or KEEP_LOGS:
-                    self.copy_logs(self.cluster)
-            except Exception as e:
-                print "Error saving log:", str(e)
-            finally:
-                reset_environment_vars()
-                if failed:
-                    cleanup_cluster(self.cluster, self.test_path)
-                    kill_windows_cassandra_procs()
-                    self.initialize_cluster()
-
-    @classmethod
-    def initialize_cluster(cls):
-        """
-        This method is responsible for initializing and configuring a ccm
-        cluster for the next set of tests.  This can be called for two
-        different reasons:
-         * A class of tests is starting
-         * A test method failed/errored, so the cluster has been wiped
-
-        Subclasses that require custom initialization should generally
-        do so by overriding post_initialize_cluster().
-        """
-        cls.test_path = get_test_path()
-        cls.cluster = create_ccm_cluster(cls.test_path, name='test')
-        cls.init_config()
-
-        maybe_setup_jacoco(cls.test_path)
-        cls.init_config()
-        write_last_test_file(cls.test_path, cls.cluster)
-        set_log_levels(cls.cluster)
-
-        cls.post_initialize_cluster()
-
-    @classmethod
-    def post_initialize_cluster(cls):
-        """
-        This method is called after the ccm cluster has been created
-        and default config options have been applied.  Any custom
-        initialization for a test class should generally be done
-        here in order to correctly handle cluster restarts after
-        test method failures.
-        """
-        pass
-
-    @classmethod
-    def init_config(cls):
-        init_default_config(cls.cluster, cls.cluster_options)
-
-
 class MultiError(Exception):
     """
     Extends Exception to provide reporting multiple exceptions at once.
@@ -1109,24 +451,20 @@ def run_scenarios(scenarios, handler, deferred_exceptions=tuple()):
     tracebacks = []
 
     for i, scenario in enumerate(scenarios, 1):
-        debug("running scenario {}/{}: {}".format(i, len(scenarios), scenario))
+        logger.debug("running scenario {}/{}: {}".format(i, len(scenarios), scenario))
 
         try:
             handler(scenario)
         except deferred_exceptions as e:
             tracebacks.append(traceback.format_exc(sys.exc_info()))
-            errors.append(type(e)('encountered {} {} running scenario:\n  {}\n'.format(e.__class__.__name__, e.message, scenario)))
-            debug("scenario {}/{} encountered a deferrable exception, continuing".format(i, len(scenarios)))
+            errors.append(type(e)('encountered {} {} running scenario:\n  {}\n'.format(e.__class__.__name__, str(e), scenario)))
+            logger.debug("scenario {}/{} encountered a deferrable exception, continuing".format(i, len(scenarios)))
         except Exception as e:
             # catch-all for any exceptions not intended to be deferred
             tracebacks.append(traceback.format_exc(sys.exc_info()))
-            errors.append(type(e)('encountered {} {} running scenario:\n  {}\n'.format(e.__class__.__name__, e.message, scenario)))
-            debug("scenario {}/{} encountered a non-deferrable exception, aborting".format(i, len(scenarios)))
+            errors.append(type(e)('encountered {} {} running scenario:\n  {}\n'.format(e.__class__.__name__, str(e), scenario)))
+            logger.debug("scenario {}/{} encountered a non-deferrable exception, aborting".format(i, len(scenarios)))
             raise MultiError(errors, tracebacks)
 
     if errors:
         raise MultiError(errors, tracebacks)
-
-
-def supports_v5_protocol(cluster_version):
-    return cluster_version >= LooseVersion('4.0')

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/dtest_setup.py
----------------------------------------------------------------------
diff --git a/dtest_setup.py b/dtest_setup.py
new file mode 100644
index 0000000..87014f4
--- /dev/null
+++ b/dtest_setup.py
@@ -0,0 +1,498 @@
+import pytest
+import glob
+import os
+import shutil
+import time
+import logging
+import re
+import tempfile
+import subprocess
+import sys
+import errno
+import pprint
+from collections import OrderedDict
+
+from cassandra.cluster import Cluster as PyCluster
+from cassandra.cluster import NoHostAvailable
+from cassandra.cluster import EXEC_PROFILE_DEFAULT
+from cassandra.policies import WhiteListRoundRobinPolicy
+from ccmlib.common import get_version_from_build, is_win
+from ccmlib.cluster import Cluster
+
+from dtest import (get_ip_from_node, make_execution_profile, get_auth_provider, get_port_from_node,
+                   get_eager_protocol_version)
+from distutils.version import LooseVersion
+
+from tools.context import log_filter
+from tools.funcutils import merge_dicts
+
+logger = logging.getLogger(__name__)
+
+
+def retry_till_success(fun, *args, **kwargs):
+    timeout = kwargs.pop('timeout', 60)
+    bypassed_exception = kwargs.pop('bypassed_exception', Exception)
+
+    deadline = time.time() + timeout
+    while True:
+        try:
+            return fun(*args, **kwargs)
+        except bypassed_exception:
+            if time.time() > deadline:
+                raise
+            else:
+                # brief pause before next attempt
+                time.sleep(0.25)
+
+
+class DTestSetup:
+    def __init__(self, dtest_config=None, setup_overrides=None):
+        self.dtest_config = dtest_config
+        self.setup_overrides = setup_overrides
+        self.ignore_log_patterns = []
+        self.cluster = None
+        self.cluster_options = []
+        self.replacement_node = None
+        self.allow_log_errors = False
+        self.connections = []
+
+        self.log_saved_dir = "logs"
+        try:
+            os.mkdir(self.log_saved_dir)
+        except OSError:
+            pass
+
+        self.last_log = os.path.join(self.log_saved_dir, "last")
+        self.test_path = self.get_test_path()
+        self.enable_for_jolokia = False
+        self.subprocs = []
+        self.log_watch_thread = None
+        self.last_test_dir = "last_test_dir"
+        self.jvm_args = []
+
+    def get_test_path(self):
+        test_path = tempfile.mkdtemp(prefix='dtest-')
+
+        # ccm on cygwin needs absolute path to directory - it crosses from cygwin space into
+        # regular Windows space on wmic calls which will otherwise break pathing
+        if sys.platform == "cygwin":
+            process = subprocess.Popen(["cygpath", "-m", test_path], stdout=subprocess.PIPE,
+                                       stderr=subprocess.STDOUT)
+            test_path = process.communicate()[0].rstrip()
+
+        return test_path
+
+    def glob_data_dirs(self, path, ks="ks"):
+        result = []
+        for node in self.cluster.nodelist():
+            for data_dir in node.data_directories():
+                ks_dir = os.path.join(data_dir, ks, path)
+                result.extend(glob.glob(ks_dir))
+        return result
+
+    def begin_active_log_watch(self):
+        """
+        Calls into ccm to start actively watching logs.
+
+        In the event that errors are seen in logs, ccm will call back to _log_error_handler.
+
+        When the cluster is no longer in use, stop_active_log_watch should be called to end log watching.
+        (otherwise a 'daemon' thread will (needlessly) run until the process exits).
+        """
+        self._log_watch_thread = self.cluster.actively_watch_logs_for_error(self._log_error_handler, interval=0.25)
+
+    def _log_error_handler(self, errordata):
+        """
+        Callback handler used in conjunction with begin_active_log_watch.
+        When called, prepares exception instance, we will use pytest.fail
+        to kill the current test being executed and mark it as failed
+
+        @param errordata is a dictonary mapping node name to failure list.
+        """
+        # in some cases self.allow_log_errors may get set after proactive log checking has been enabled
+        # so we need to double-check first thing before proceeding
+        if self.allow_log_errors:
+            return
+
+        reportable_errordata = OrderedDict()
+
+        for nodename, errors in list(errordata.items()):
+            filtered_errors = list(self.__filter_errors(['\n'.join(msg) for msg in errors]))
+            if len(filtered_errors) is not 0:
+                reportable_errordata[nodename] = filtered_errors
+
+        # no errors worthy of halting the test
+        if not reportable_errordata:
+            return
+
+        message = "Errors seen in logs for: {nodes}".format(nodes=", ".join(list(reportable_errordata.keys())))
+        for nodename, errors in list(reportable_errordata.items()):
+            for error in errors:
+                message += "\n{nodename}: {error}".format(nodename=nodename, error=error)
+
+        logger.debug('Errors were just seen in logs, ending test (if not ending already)!')
+        pytest.fail("Error details: \n{message}".format(message=message))
+
+    def copy_logs(self, directory=None, name=None):
+        """Copy the current cluster's log files somewhere, by default to LOG_SAVED_DIR with a name of 'last'"""
+        if directory is None:
+            directory = self.log_saved_dir
+        if name is None:
+            name = self.last_log
+        else:
+            name = os.path.join(directory, name)
+        if not os.path.exists(directory):
+            os.mkdir(directory)
+        logs = [(node.name, node.logfilename(), node.debuglogfilename(), node.gclogfilename(),
+                 node.compactionlogfilename())
+                for node in list(self.cluster.nodes.values())]
+        if len(logs) is not 0:
+            basedir = str(int(time.time() * 1000)) + '_' + str(id(self))
+            logdir = os.path.join(directory, basedir)
+            os.mkdir(logdir)
+            for n, log, debuglog, gclog, compactionlog in logs:
+                if os.path.exists(log):
+                    assert os.path.getsize(log) >= 0
+                    shutil.copyfile(log, os.path.join(logdir, n + ".log"))
+                if os.path.exists(debuglog):
+                    assert os.path.getsize(debuglog) >= 0
+                    shutil.copyfile(debuglog, os.path.join(logdir, n + "_debug.log"))
+                if os.path.exists(gclog):
+                    assert os.path.getsize(gclog) >= 0
+                    shutil.copyfile(gclog, os.path.join(logdir, n + "_gc.log"))
+                if os.path.exists(compactionlog):
+                    assert os.path.getsize(compactionlog) >= 0
+                    shutil.copyfile(compactionlog, os.path.join(logdir, n + "_compaction.log"))
+            if os.path.exists(name):
+                os.unlink(name)
+            if not is_win():
+                os.symlink(basedir, name)
+
+    def cql_connection(self, node, keyspace=None, user=None,
+                       password=None, compression=True, protocol_version=None, port=None, ssl_opts=None, **kwargs):
+
+        return self._create_session(node, keyspace, user, password, compression,
+                                    protocol_version, port=port, ssl_opts=ssl_opts, **kwargs)
+
+    def exclusive_cql_connection(self, node, keyspace=None, user=None,
+                                 password=None, compression=True, protocol_version=None, port=None, ssl_opts=None,
+                                 **kwargs):
+
+        node_ip = get_ip_from_node(node)
+        wlrr = WhiteListRoundRobinPolicy([node_ip])
+
+        return self._create_session(node, keyspace, user, password, compression,
+                                    protocol_version, port=port, ssl_opts=ssl_opts, load_balancing_policy=wlrr,
+                                    **kwargs)
+
+    def _create_session(self, node, keyspace, user, password, compression, protocol_version,
+                        port=None, ssl_opts=None, execution_profiles=None, **kwargs):
+        node_ip = get_ip_from_node(node)
+        if not port:
+            port = get_port_from_node(node)
+
+        if protocol_version is None:
+            protocol_version = get_eager_protocol_version(node.cluster.version())
+
+        if user is not None:
+            auth_provider = get_auth_provider(user=user, password=password)
+        else:
+            auth_provider = None
+
+        profiles = {EXEC_PROFILE_DEFAULT: make_execution_profile(**kwargs)
+                    } if not execution_profiles else execution_profiles
+
+        cluster = PyCluster([node_ip],
+                            auth_provider=auth_provider,
+                            compression=compression,
+                            protocol_version=protocol_version,
+                            port=port,
+                            ssl_options=ssl_opts,
+                            connect_timeout=15,
+                            allow_beta_protocol_version=True,
+                            execution_profiles=profiles)
+        session = cluster.connect(wait_for_all_pools=True)
+
+        if keyspace is not None:
+            session.set_keyspace(keyspace)
+
+        self.connections.append(session)
+        return session
+
+    def patient_cql_connection(self, node, keyspace=None,
+                               user=None, password=None, timeout=30, compression=True,
+                               protocol_version=None, port=None, ssl_opts=None, **kwargs):
+        """
+        Returns a connection after it stops throwing NoHostAvailables due to not being ready.
+
+        If the timeout is exceeded, the exception is raised.
+        """
+        if is_win():
+            timeout *= 2
+
+        expected_log_lines = ('Control connection failed to connect, shutting down Cluster:',
+                              '[control connection] Error connecting to ')
+        with log_filter('cassandra.cluster', expected_log_lines):
+            session = retry_till_success(
+                self.cql_connection,
+                node,
+                keyspace=keyspace,
+                user=user,
+                password=password,
+                timeout=timeout,
+                compression=compression,
+                protocol_version=protocol_version,
+                port=port,
+                ssl_opts=ssl_opts,
+                bypassed_exception=NoHostAvailable,
+                **kwargs
+            )
+
+        return session
+
+    def patient_exclusive_cql_connection(self, node, keyspace=None,
+                                         user=None, password=None, timeout=30, compression=True,
+                                         protocol_version=None, port=None, ssl_opts=None, **kwargs):
+        """
+        Returns a connection after it stops throwing NoHostAvailables due to not being ready.
+
+        If the timeout is exceeded, the exception is raised.
+        """
+        if is_win():
+            timeout *= 2
+
+        return retry_till_success(
+            self.exclusive_cql_connection,
+            node,
+            keyspace=keyspace,
+            user=user,
+            password=password,
+            timeout=timeout,
+            compression=compression,
+            protocol_version=protocol_version,
+            port=port,
+            ssl_opts=ssl_opts,
+            bypassed_exception=NoHostAvailable,
+            **kwargs
+        )
+
+    def check_logs_for_errors(self):
+        for node in self.cluster.nodelist():
+            errors = list(self.__filter_errors(
+                ['\n'.join(msg) for msg in node.grep_log_for_errors()]))
+            if len(errors) is not 0:
+                for error in errors:
+                    print("Unexpected error in {node_name} log, error: \n{error}".format(node_name=node.name, error=error))
+                return True
+
+    def __filter_errors(self, errors):
+        """Filter errors, removing those that match self.ignore_log_patterns"""
+        if not hasattr(self, 'ignore_log_patterns'):
+            self.ignore_log_patterns = []
+        for e in errors:
+            for pattern in self.ignore_log_patterns:
+                if re.search(pattern, e):
+                    break
+            else:
+                yield e
+
+    def get_jfr_jvm_args(self):
+        """
+        @return The JVM arguments required for attaching flight recorder to a Java process.
+        """
+        return ["-XX:+UnlockCommercialFeatures", "-XX:+FlightRecorder"]
+
+    def start_jfr_recording(self, nodes):
+        """
+        Start Java flight recorder provided the cluster was started with the correct jvm arguments.
+        """
+        for node in nodes:
+            p = subprocess.Popen(['jcmd', str(node.pid), 'JFR.start'],
+                                 stdout=subprocess.PIPE,
+                                 stderr=subprocess.PIPE)
+            stdout, stderr = p.communicate()
+            logger.debug(stdout)
+            logger.debug(stderr)
+
+    def dump_jfr_recording(self, nodes):
+        """
+        Save Java flight recorder results to file for analyzing with mission control.
+        """
+        for node in nodes:
+            p = subprocess.Popen(['jcmd', str(node.pid), 'JFR.dump',
+                                  'recording=1', 'filename=recording_{}.jfr'.format(node.address())],
+                                 stdout=subprocess.PIPE,
+                                 stderr=subprocess.PIPE)
+            stdout, stderr = p.communicate()
+            logger.debug(stdout)
+            logger.debug(stderr)
+
+    def supports_v5_protocol(self, cluster_version):
+        return cluster_version >= LooseVersion('4.0')
+
+    def cleanup_last_test_dir(self):
+        if os.path.exists(self.last_test_dir):
+            os.remove(self.last_test_dir)
+
+    def stop_active_log_watch(self):
+        """
+        Joins the log watching thread, which will then exit.
+        Should be called after each test, ideally after nodes are stopped but before cluster files are removed.
+
+        Can be called multiple times without error.
+        If not called, log watching thread will remain running until the parent process exits.
+        """
+        self.log_watch_thread.join(timeout=60)
+
+    def cleanup_cluster(self):
+        with log_filter('cassandra'):  # quiet noise from driver when nodes start going down
+            if self.dtest_config.keep_test_dir:
+                self.cluster.stop(gently=self.dtest_config.enable_jacoco_code_coverage)
+            else:
+                # when recording coverage the jvm has to exit normally
+                # or the coverage information is not written by the jacoco agent
+                # otherwise we can just kill the process
+                if self.dtest_config.enable_jacoco_code_coverage:
+                    self.cluster.stop(gently=True)
+
+                # Cleanup everything:
+                try:
+                    if self.log_watch_thread:
+                        self.stop_active_log_watch()
+                finally:
+                    logger.debug("removing ccm cluster {name} at: {path}".format(name=self.cluster.name,
+                                                                          path=self.test_path))
+                    self.cluster.remove()
+
+                    logger.debug("clearing ssl stores from [{0}] directory".format(self.test_path))
+                    for filename in ('keystore.jks', 'truststore.jks', 'ccm_node.cer'):
+                        try:
+                            os.remove(os.path.join(self.test_path, filename))
+                        except OSError as e:
+                            # ENOENT = no such file or directory
+                            assert e.errno == errno.ENOENT
+
+                    os.rmdir(self.test_path)
+                    self.cleanup_last_test_dir()
+
+    def cleanup_and_replace_cluster(self):
+        for con in self.connections:
+            con.cluster.shutdown()
+        self.connections = []
+
+        self.cleanup_cluster()
+        self.test_path = self.get_test_path()
+        self.initialize_cluster()
+
+    def init_default_config(self):
+        # the failure detector can be quite slow in such tests with quick start/stop
+        phi_values = {'phi_convict_threshold': 5}
+
+        timeout = 15000
+        if self.cluster_options is not None and len(self.cluster_options) > 0:
+            values = merge_dicts(self.cluster_options, phi_values)
+        else:
+            values = merge_dicts(phi_values, {
+                'read_request_timeout_in_ms': timeout,
+                'range_request_timeout_in_ms': timeout,
+                'write_request_timeout_in_ms': timeout,
+                'truncate_request_timeout_in_ms': timeout,
+                'request_timeout_in_ms': timeout
+            })
+
+        if self.setup_overrides is not None and len(self.setup_overrides.cluster_options) > 0:
+            values = merge_dicts(values, self.setup_overrides.cluster_options)
+
+        # No more thrift in 4.0, and start_rpc doesn't exists anymore
+        if self.cluster.version() >= '4' and 'start_rpc' in values:
+            del values['start_rpc']
+
+        self.cluster.set_configuration_options(values)
+        logger.debug("Done setting configuration options:\n" + pprint.pformat(self.cluster._config_options, indent=4))
+
+    def maybe_setup_jacoco(self, cluster_name='test'):
+        """Setup JaCoCo code coverage support"""
+
+        if not self.dtest_config.enable_jacoco_code_coverage:
+            return
+
+        # use explicit agent and execfile locations
+        # or look for a cassandra build if they are not specified
+        agent_location = os.environ.get('JACOCO_AGENT_JAR',
+                                        os.path.join(self.dtest_config.cassandra_dir, 'build/lib/jars/jacocoagent.jar'))
+        jacoco_execfile = os.environ.get('JACOCO_EXECFILE',
+                                         os.path.join(self.dtest_config.cassandra_dir, 'build/jacoco/jacoco.exec'))
+
+        if os.path.isfile(agent_location):
+            logger.debug("Jacoco agent found at {}".format(agent_location))
+            with open(os.path.join(
+                    self.test_path, cluster_name, 'cassandra.in.sh'), 'w') as f:
+
+                f.write('JVM_OPTS="$JVM_OPTS -javaagent:{jar_path}=destfile={exec_file}"'
+                        .format(jar_path=agent_location, exec_file=jacoco_execfile))
+
+                if os.path.isfile(jacoco_execfile):
+                    logger.debug("Jacoco execfile found at {}, execution data will be appended".format(jacoco_execfile))
+                else:
+                    logger.debug("Jacoco execfile will be created at {}".format(jacoco_execfile))
+        else:
+            logger.debug("Jacoco agent not found or is not file. Execution will not be recorded.")
+
+    def create_ccm_cluster(self, name):
+        logger.debug("cluster ccm directory: " + self.test_path)
+        version = self.dtest_config.cassandra_version
+
+        if version:
+            cluster = Cluster(self.test_path, name, cassandra_version=version)
+        else:
+            cluster = Cluster(self.test_path, name, cassandra_dir=self.dtest_config.cassandra_dir)
+
+        if self.dtest_config.use_vnodes:
+            cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': self.dtest_config.num_tokens})
+        else:
+            cluster.set_configuration_options(values={'num_tokens': None})
+
+        if self.dtest_config.use_off_heap_memtables:
+            cluster.set_configuration_options(values={'memtable_allocation_type': 'offheap_objects'})
+
+        cluster.set_datadir_count(self.dtest_config.data_dir_count)
+        cluster.set_environment_variable('CASSANDRA_LIBJEMALLOC', self.dtest_config.jemalloc_path)
+
+        return cluster
+
+    def set_cluster_log_levels(self):
+        """
+        The root logger gets configured in the fixture named fixture_logging_setup.
+        Based on the logging configuration options the user invoked pytest with,
+        that fixture sets the root logger to that configuration. We then ensure all
+        Cluster objects we work with "inherit" these logging settings (which we can
+        lookup off the root logger)
+        """
+        if logging.root.level != 'NOTSET':
+            log_level = logging.getLevelName(logging.INFO)
+        else:
+            log_level = logging.root.level
+        self.cluster.set_log_level(log_level)
+
+    def initialize_cluster(self):
+        """
+        This method is responsible for initializing and configuring a ccm
+        cluster for the next set of tests.  This can be called for two
+        different reasons:
+         * A class of tests is starting
+         * A test method failed/errored, so the cluster has been wiped
+
+        Subclasses that require custom initialization should generally
+        do so by overriding post_initialize_cluster().
+        """
+        # connections = []
+        # cluster_options = []
+        self.cluster = self.create_ccm_cluster(name='test')
+        self.init_default_config()
+        self.maybe_setup_jacoco()
+        self.set_cluster_log_levels()
+
+        # cls.init_config()
+        # write_last_test_file(cls.test_path, cls.cluster)
+
+        # cls.post_initialize_cluster()

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/dtest_setup_overrides.py
----------------------------------------------------------------------
diff --git a/dtest_setup_overrides.py b/dtest_setup_overrides.py
new file mode 100644
index 0000000..6ea3258
--- /dev/null
+++ b/dtest_setup_overrides.py
@@ -0,0 +1,3 @@
+class DTestSetupOverrides:
+    def __init__(self):
+        self.cluster_options = []
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/global_row_key_cache_test.py
----------------------------------------------------------------------
diff --git a/global_row_key_cache_test.py b/global_row_key_cache_test.py
index c16f793..74c67f4 100644
--- a/global_row_key_cache_test.py
+++ b/global_row_key_cache_test.py
@@ -1,13 +1,16 @@
 import time
+import logging
 
 from cassandra.concurrent import execute_concurrent_with_args
 
-from dtest import Tester, debug, create_ks
+from dtest import Tester, create_ks, create_cf_simple
+
+logger = logging.getLogger(__name__)
 
 
 class TestGlobalRowKeyCache(Tester):
 
-    def functional_test(self):
+    def test_functional(self):
         cluster = self.cluster
         cluster.populate(3)
         node1 = cluster.nodelist()[0]
@@ -15,7 +18,7 @@ class TestGlobalRowKeyCache(Tester):
         for keycache_size in (0, 10):
             for rowcache_size in (0, 10):
                 cluster.stop()
-                debug("Testing with keycache size of %d MB, rowcache size of %d MB " %
+                logger.debug("Testing with keycache size of %d MB, rowcache size of %d MB " %
                       (keycache_size, rowcache_size))
                 keyspace_name = 'ks_%d_%d' % (keycache_size, rowcache_size)
 
@@ -29,14 +32,15 @@ class TestGlobalRowKeyCache(Tester):
 
                 cluster.start()
                 session = self.patient_cql_connection(node1)
-
                 create_ks(session, keyspace_name, rf=3)
 
                 session.set_keyspace(keyspace_name)
-                session.execute("CREATE TABLE test (k int PRIMARY KEY, v1 int, v2 int)")
-                session.execute("CREATE TABLE test_clustering (k int, v1 int, v2 int, PRIMARY KEY (k, v1))")
-                session.execute("CREATE TABLE test_counter (k int PRIMARY KEY, v1 counter)")
-                session.execute("CREATE TABLE test_counter_clustering (k int, v1 int, v2 counter, PRIMARY KEY (k, v1))")
+                create_cf_simple(session, 'test', "CREATE TABLE test (k int PRIMARY KEY, v1 int, v2 int)")
+                create_cf_simple(session, 'test_clustering',
+                                 "CREATE TABLE test_clustering (k int, v1 int, v2 int, PRIMARY KEY (k, v1))")
+                create_cf_simple(session, 'test_counter', "CREATE TABLE test_counter (k int PRIMARY KEY, v1 counter)")
+                create_cf_simple(session, 'test_counter_clustering',
+                                 "CREATE TABLE test_counter_clustering (k int, v1 int, v2 counter, PRIMARY KEY (k, v1))")
 
                 # insert 100 rows into each table
                 for cf in ('test', 'test_clustering'):
@@ -87,12 +91,12 @@ class TestGlobalRowKeyCache(Tester):
                 session.shutdown()
 
                 # let the data be written to the row/key caches.
-                debug("Letting caches be saved to disk")
+                logger.debug("Letting caches be saved to disk")
                 time.sleep(10)
-                debug("Stopping cluster")
+                logger.debug("Stopping cluster")
                 cluster.stop()
                 time.sleep(1)
-                debug("Starting cluster")
+                logger.debug("Starting cluster")
                 cluster.start()
                 time.sleep(5)  # read the data back from row and key caches
 
@@ -108,38 +112,38 @@ class TestGlobalRowKeyCache(Tester):
             rows = list(session.execute("SELECT * FROM %s" % (cf,)))
 
             # one row gets deleted each validation round
-            self.assertEquals(100 - (validation_round + 1), len(rows))
+            assert 100 - (validation_round + 1) == len(rows)
 
             # adjust enumeration start to account for row deletions
             for i, row in enumerate(sorted(rows), start=(validation_round + 1)):
-                self.assertEquals(i, row.k)
-                self.assertEquals(i, row.v1)
+                assert i == row.k
+                assert i == row.v1
 
                 # updated rows will have different values
                 expected_value = validation_round if i < num_updates else i
-                self.assertEquals(expected_value, row.v2)
+                assert expected_value == row.v2
 
         # check values of counter tables
         rows = list(session.execute("SELECT * FROM test_counter"))
-        self.assertEquals(100, len(rows))
+        assert 100 == len(rows)
         for i, row in enumerate(sorted(rows)):
-            self.assertEquals(i, row.k)
+            assert i == row.k
 
             # updated rows will get incremented once each round
             expected_value = i
             if i < num_updates:
                 expected_value += validation_round + 1
 
-            self.assertEquals(expected_value, row.v1)
+            assert expected_value == row.v1
 
         rows = list(session.execute("SELECT * FROM test_counter_clustering"))
-        self.assertEquals(100, len(rows))
+        assert 100 == len(rows)
         for i, row in enumerate(sorted(rows)):
-            self.assertEquals(i, row.k)
-            self.assertEquals(i, row.v1)
+            assert i == row.k
+            assert i == row.v1
 
             expected_value = i
             if i < num_updates:
                 expected_value += validation_round + 1
 
-            self.assertEquals(expected_value, row.v2)
+            assert expected_value == row.v2

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/hintedhandoff_test.py
----------------------------------------------------------------------
diff --git a/hintedhandoff_test.py b/hintedhandoff_test.py
index 6345e3c..68d341e 100644
--- a/hintedhandoff_test.py
+++ b/hintedhandoff_test.py
@@ -1,11 +1,15 @@
 import os
 import time
+import pytest
+import logging
 
 from cassandra import ConsistencyLevel
 
-from dtest import DISABLE_VNODES, Tester, create_ks
+from dtest import Tester, create_ks
 from tools.data import create_c1c2_table, insert_c1c2, query_c1c2
-from tools.decorators import no_vnodes, since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('3.0')
@@ -26,7 +30,7 @@ class TestHintedHandoffConfig(Tester):
         if config_options:
             cluster.set_configuration_options(values=config_options)
 
-        if DISABLE_VNODES:
+        if not self.dtest_config.use_vnodes:
             cluster.populate([2]).start()
         else:
             tokens = cluster.balanced_tokens(2)
@@ -39,7 +43,7 @@ class TestHintedHandoffConfig(Tester):
         Launch a nodetool command and check there is no error, return the result
         """
         out, err, _ = node.nodetool(cmd)
-        self.assertEqual('', err)
+        assert '' == err
         return out
 
     def _do_hinted_handoff(self, node1, node2, enabled, keyspace='ks'):
@@ -65,13 +69,13 @@ class TestHintedHandoffConfig(Tester):
 
         # Check node2 for all the keys that should have been delivered via HH if enabled or not if not enabled
         session = self.patient_exclusive_cql_connection(node2, keyspace=keyspace)
-        for n in xrange(0, 100):
+        for n in range(0, 100):
             if enabled:
                 query_c1c2(session, n, ConsistencyLevel.ONE)
             else:
                 query_c1c2(session, n, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
 
-    def nodetool_test(self):
+    def test_nodetool(self):
         """
         Test various nodetool commands
         """
@@ -79,25 +83,25 @@ class TestHintedHandoffConfig(Tester):
 
         for node in node1, node2:
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running', res.rstrip())
+            assert 'Hinted handoff is running' == res.rstrip()
 
             self._launch_nodetool_cmd(node, 'disablehandoff')
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is not running', res.rstrip())
+            assert 'Hinted handoff is not running' == res.rstrip()
 
             self._launch_nodetool_cmd(node, 'enablehandoff')
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running', res.rstrip())
+            assert 'Hinted handoff is running' == res.rstrip()
 
             self._launch_nodetool_cmd(node, 'disablehintsfordc dc1')
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep), res.rstrip())
+            assert 'Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep) == res.rstrip()
 
             self._launch_nodetool_cmd(node, 'enablehintsfordc dc1')
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running', res.rstrip())
+            assert 'Hinted handoff is running' == res.rstrip()
 
-    def hintedhandoff_disabled_test(self):
+    def test_hintedhandoff_disabled(self):
         """
         Test gloabl hinted handoff disabled
         """
@@ -105,11 +109,11 @@ class TestHintedHandoffConfig(Tester):
 
         for node in node1, node2:
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is not running', res.rstrip())
+            assert 'Hinted handoff is not running' == res.rstrip()
 
         self._do_hinted_handoff(node1, node2, False)
 
-    def hintedhandoff_enabled_test(self):
+    def test_hintedhandoff_enabled(self):
         """
         Test global hinted handoff enabled
         """
@@ -117,12 +121,12 @@ class TestHintedHandoffConfig(Tester):
 
         for node in node1, node2:
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running', res.rstrip())
+            assert 'Hinted handoff is running' == res.rstrip()
 
         self._do_hinted_handoff(node1, node2, True)
 
     @since('4.0')
-    def hintedhandoff_setmaxwindow_test(self):
+    def test_hintedhandoff_setmaxwindow(self):
         """
         Test global hinted handoff against max_hint_window_in_ms update via nodetool
         """
@@ -130,18 +134,18 @@ class TestHintedHandoffConfig(Tester):
 
         for node in node1, node2:
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running', res.rstrip())
+            assert 'Hinted handoff is running' == res.rstrip()
 
         res = self._launch_nodetool_cmd(node, 'getmaxhintwindow')
-        self.assertEqual('Current max hint window: 300000 ms', res.rstrip())
+        assert 'Current max hint window: 300000 ms' == res.rstrip()
         self._do_hinted_handoff(node1, node2, True)
         node1.start(wait_other_notice=True)
         self._launch_nodetool_cmd(node, 'setmaxhintwindow 1')
         res = self._launch_nodetool_cmd(node, 'getmaxhintwindow')
-        self.assertEqual('Current max hint window: 1 ms', res.rstrip())
+        assert 'Current max hint window: 1 ms' == res.rstrip()
         self._do_hinted_handoff(node1, node2, False, keyspace='ks2')
 
-    def hintedhandoff_dc_disabled_test(self):
+    def test_hintedhandoff_dc_disabled(self):
         """
         Test global hinted handoff enabled with the dc disabled
         """
@@ -150,11 +154,11 @@ class TestHintedHandoffConfig(Tester):
 
         for node in node1, node2:
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep), res.rstrip())
+            assert 'Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep) == res.rstrip()
 
         self._do_hinted_handoff(node1, node2, False)
 
-    def hintedhandoff_dc_reenabled_test(self):
+    def test_hintedhandoff_dc_reenabled(self):
         """
         Test global hinted handoff enabled with the dc disabled first and then re-enabled
         """
@@ -163,20 +167,20 @@ class TestHintedHandoffConfig(Tester):
 
         for node in node1, node2:
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep), res.rstrip())
+            assert 'Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep) == res.rstrip()
 
         for node in node1, node2:
             self._launch_nodetool_cmd(node, 'enablehintsfordc dc1')
             res = self._launch_nodetool_cmd(node, 'statushandoff')
-            self.assertEqual('Hinted handoff is running', res.rstrip())
+            assert 'Hinted handoff is running' == res.rstrip()
 
         self._do_hinted_handoff(node1, node2, True)
 
 
 class TestHintedHandoff(Tester):
 
-    @no_vnodes()
-    def hintedhandoff_decom_test(self):
+    @pytest.mark.no_vnodes
+    def test_hintedhandoff_decom(self):
         self.cluster.populate(4).start(wait_for_binary_proto=True)
         [node1, node2, node3, node4] = self.cluster.nodelist()
         session = self.patient_cql_connection(node1)
@@ -192,5 +196,5 @@ class TestHintedHandoff(Tester):
         node3.decommission(force=force)
 
         time.sleep(5)
-        for x in xrange(0, 100):
+        for x in range(0, 100):
             query_c1c2(session, x, ConsistencyLevel.ONE)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/internode_ssl_test.py
----------------------------------------------------------------------
diff --git a/internode_ssl_test.py b/internode_ssl_test.py
index 4149d26..fff9985 100644
--- a/internode_ssl_test.py
+++ b/internode_ssl_test.py
@@ -1,11 +1,15 @@
-from dtest import Tester, debug, create_ks, create_cf
+import logging
+
+from dtest import Tester, create_ks, create_cf
 from tools.data import putget
 from tools.misc import generate_ssl_stores
 
+logger = logging.getLogger(__name__)
+
 
 class TestInternodeSSL(Tester):
 
-    def putget_with_internode_ssl_test(self):
+    def test_putget_with_internode_ssl(self):
         """
         Simple putget test with internode ssl enabled
         with default 'all' internode compression
@@ -13,7 +17,7 @@ class TestInternodeSSL(Tester):
         """
         self.__putget_with_internode_ssl_test('all')
 
-    def putget_with_internode_ssl_without_compression_test(self):
+    def test_putget_with_internode_ssl_without_compression(self):
         """
         Simple putget test with internode ssl enabled
         without internode compression
@@ -24,10 +28,10 @@ class TestInternodeSSL(Tester):
     def __putget_with_internode_ssl_test(self, internode_compression):
         cluster = self.cluster
 
-        debug("***using internode ssl***")
-        generate_ssl_stores(self.test_path)
+        logger.debug("***using internode ssl***")
+        generate_ssl_stores(self.fixture_dtest_setup.test_path)
         cluster.set_configuration_options({'internode_compression': internode_compression})
-        cluster.enable_internode_ssl(self.test_path)
+        cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path)
 
         cluster.populate(3).start()
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/jmx_auth_test.py
----------------------------------------------------------------------
diff --git a/jmx_auth_test.py b/jmx_auth_test.py
index 99e7d80..99b227f 100644
--- a/jmx_auth_test.py
+++ b/jmx_auth_test.py
@@ -1,16 +1,19 @@
+import pytest
+import logging
 from distutils.version import LooseVersion
 
 from ccmlib.node import ToolError
-
 from dtest import Tester
-from tools.decorators import since
 from tools.jmxutils import apply_jmx_authentication
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 @since('3.6')
 class TestJMXAuth(Tester):
 
-    def basic_auth_test(self):
+    def test_basic_auth(self):
         """
         Some basic smoke testing of JMX authentication and authorization.
         Uses nodetool as a means of exercising the JMX interface as JolokiaAgent
@@ -29,21 +32,21 @@ class TestJMXAuth(Tester):
         session.execute("GRANT DESCRIBE ON ALL MBEANS TO jmx_user")
         session.execute("CREATE ROLE test WITH LOGIN=true and PASSWORD='abc123'")
 
-        with self.assertRaisesRegexp(ToolError, self.authentication_fail_message(node, 'baduser')):
+        with pytest.raises(ToolError, matches=self.authentication_fail_message(node, 'baduser')):
             node.nodetool('-u baduser -pw abc123 gossipinfo')
 
-        with self.assertRaisesRegexp(ToolError, self.authentication_fail_message(node, 'test')):
+        with pytest.raises(ToolError, matches=self.authentication_fail_message(node, 'test')):
             node.nodetool('-u test -pw badpassword gossipinfo')
 
-        with self.assertRaisesRegexp(ToolError, "Required key 'username' is missing"):
+        with pytest.raises(ToolError, matches="Required key 'username' is missing"):
             node.nodetool('gossipinfo')
 
         # role must have LOGIN attribute
-        with self.assertRaisesRegexp(ToolError, 'jmx_user is not permitted to log in'):
+        with pytest.raises(ToolError, matches='jmx_user is not permitted to log in'):
             node.nodetool('-u jmx_user -pw 321cba gossipinfo')
 
         # test doesn't yet have any privileges on the necessary JMX resources
-        with self.assertRaisesRegexp(ToolError, 'Access Denied'):
+        with pytest.raises(ToolError, matches='Access Denied'):
             node.nodetool('-u test -pw abc123 gossipinfo')
 
         session.execute("GRANT jmx_user TO test")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[14/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/thrift010/ttypes.py
----------------------------------------------------------------------
diff --git a/thrift_bindings/thrift010/ttypes.py b/thrift_bindings/thrift010/ttypes.py
new file mode 100644
index 0000000..c962034
--- /dev/null
+++ b/thrift_bindings/thrift010/ttypes.py
@@ -0,0 +1,4218 @@
+#
+# Autogenerated by Thrift Compiler (0.10.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
+from thrift.protocol.TProtocol import TProtocolException
+import sys
+
+from thrift.transport import TTransport
+
+
+class ConsistencyLevel(object):
+    """
+    The ConsistencyLevel is an enum that controls both read and write
+    behavior based on the ReplicationFactor of the keyspace.  The
+    different consistency levels have different meanings, depending on
+    if you're doing a write or read operation.
+
+    If W + R > ReplicationFactor, where W is the number of nodes to
+    block for on write, and R the number to block for on reads, you
+    will have strongly consistent behavior; that is, readers will
+    always see the most recent write. Of these, the most interesting is
+    to do QUORUM reads and writes, which gives you consistency while
+    still allowing availability in the face of node failures up to half
+    of <ReplicationFactor>. Of course if latency is more important than
+    consistency then you can use lower values for either or both.
+
+    Some ConsistencyLevels (ONE, TWO, THREE) refer to a specific number
+    of replicas rather than a logical concept that adjusts
+    automatically with the replication factor.  Of these, only ONE is
+    commonly used; TWO and (even more rarely) THREE are only useful
+    when you care more about guaranteeing a certain level of
+    durability, than consistency.
+
+    Write consistency levels make the following guarantees before reporting success to the client:
+      ANY          Ensure that the write has been written once somewhere, including possibly being hinted in a non-target node.
+      ONE          Ensure that the write has been written to at least 1 node's commit log and memory table
+      TWO          Ensure that the write has been written to at least 2 node's commit log and memory table
+      THREE        Ensure that the write has been written to at least 3 node's commit log and memory table
+      QUORUM       Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes
+      LOCAL_ONE    Ensure that the write has been written to 1 node within the local datacenter (requires NetworkTopologyStrategy)
+      LOCAL_QUORUM Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes, within the local datacenter (requires NetworkTopologyStrategy)
+      EACH_QUORUM  Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes in each datacenter (requires NetworkTopologyStrategy)
+      ALL          Ensure that the write is written to <code>&lt;ReplicationFactor&gt;</code> nodes before responding to the client.
+
+    Read consistency levels make the following guarantees before returning successful results to the client:
+      ANY          Not supported. You probably want ONE instead.
+      ONE          Returns the record obtained from a single replica.
+      TWO          Returns the record with the most recent timestamp once two replicas have replied.
+      THREE        Returns the record with the most recent timestamp once three replicas have replied.
+      QUORUM       Returns the record with the most recent timestamp once a majority of replicas have replied.
+      LOCAL_ONE    Returns the record with the most recent timestamp once a single replica within the local datacenter have replied.
+      LOCAL_QUORUM Returns the record with the most recent timestamp once a majority of replicas within the local datacenter have replied.
+      EACH_QUORUM  Returns the record with the most recent timestamp once a majority of replicas within each datacenter have replied.
+      ALL          Returns the record with the most recent timestamp once all replicas have replied (implies no replica may be down)..
+    """
+    ONE = 1
+    QUORUM = 2
+    LOCAL_QUORUM = 3
+    EACH_QUORUM = 4
+    ALL = 5
+    ANY = 6
+    TWO = 7
+    THREE = 8
+    SERIAL = 9
+    LOCAL_SERIAL = 10
+    LOCAL_ONE = 11
+
+    _VALUES_TO_NAMES = {
+        1: "ONE",
+        2: "QUORUM",
+        3: "LOCAL_QUORUM",
+        4: "EACH_QUORUM",
+        5: "ALL",
+        6: "ANY",
+        7: "TWO",
+        8: "THREE",
+        9: "SERIAL",
+        10: "LOCAL_SERIAL",
+        11: "LOCAL_ONE",
+    }
+
+    _NAMES_TO_VALUES = {
+        "ONE": 1,
+        "QUORUM": 2,
+        "LOCAL_QUORUM": 3,
+        "EACH_QUORUM": 4,
+        "ALL": 5,
+        "ANY": 6,
+        "TWO": 7,
+        "THREE": 8,
+        "SERIAL": 9,
+        "LOCAL_SERIAL": 10,
+        "LOCAL_ONE": 11,
+    }
+
+
+class IndexOperator(object):
+    EQ = 0
+    GTE = 1
+    GT = 2
+    LTE = 3
+    LT = 4
+
+    _VALUES_TO_NAMES = {
+        0: "EQ",
+        1: "GTE",
+        2: "GT",
+        3: "LTE",
+        4: "LT",
+    }
+
+    _NAMES_TO_VALUES = {
+        "EQ": 0,
+        "GTE": 1,
+        "GT": 2,
+        "LTE": 3,
+        "LT": 4,
+    }
+
+
+class IndexType(object):
+    KEYS = 0
+    CUSTOM = 1
+    COMPOSITES = 2
+
+    _VALUES_TO_NAMES = {
+        0: "KEYS",
+        1: "CUSTOM",
+        2: "COMPOSITES",
+    }
+
+    _NAMES_TO_VALUES = {
+        "KEYS": 0,
+        "CUSTOM": 1,
+        "COMPOSITES": 2,
+    }
+
+
+class Compression(object):
+    """
+    CQL query compression
+    """
+    GZIP = 1
+    NONE = 2
+
+    _VALUES_TO_NAMES = {
+        1: "GZIP",
+        2: "NONE",
+    }
+
+    _NAMES_TO_VALUES = {
+        "GZIP": 1,
+        "NONE": 2,
+    }
+
+
+class CqlResultType(object):
+    ROWS = 1
+    VOID = 2
+    INT = 3
+
+    _VALUES_TO_NAMES = {
+        1: "ROWS",
+        2: "VOID",
+        3: "INT",
+    }
+
+    _NAMES_TO_VALUES = {
+        "ROWS": 1,
+        "VOID": 2,
+        "INT": 3,
+    }
+
+
+class Column(object):
+    """
+    Basic unit of data within a ColumnFamily.
+    @param name, the name by which this column is set and retrieved.  Maximum 64KB long.
+    @param value. The data associated with the name.  Maximum 2GB long, but in practice you should limit it to small numbers of MB (since Thrift must read the full value into memory to operate on it).
+    @param timestamp. The timestamp is used for conflict detection/resolution when two columns with same name need to be compared.
+    @param ttl. An optional, positive delay (in seconds) after which the column will be automatically deleted.
+
+    Attributes:
+     - name
+     - value
+     - timestamp
+     - ttl
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'name', 'BINARY', None, ),  # 1
+        (2, TType.STRING, 'value', 'BINARY', None, ),  # 2
+        (3, TType.I64, 'timestamp', None, None, ),  # 3
+        (4, TType.I32, 'ttl', None, None, ),  # 4
+    )
+
+    def __init__(self, name=None, value=None, timestamp=None, ttl=None,):
+        self.name = name
+        self.value = value
+        self.timestamp = timestamp
+        self.ttl = ttl
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.name = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRING:
+                    self.value = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.I64:
+                    self.timestamp = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.I32:
+                    self.ttl = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('Column')
+        if self.name is not None:
+            oprot.writeFieldBegin('name', TType.STRING, 1)
+            oprot.writeBinary(self.name)
+            oprot.writeFieldEnd()
+        if self.value is not None:
+            oprot.writeFieldBegin('value', TType.STRING, 2)
+            oprot.writeBinary(self.value)
+            oprot.writeFieldEnd()
+        if self.timestamp is not None:
+            oprot.writeFieldBegin('timestamp', TType.I64, 3)
+            oprot.writeI64(self.timestamp)
+            oprot.writeFieldEnd()
+        if self.ttl is not None:
+            oprot.writeFieldBegin('ttl', TType.I32, 4)
+            oprot.writeI32(self.ttl)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.name is None:
+            raise TProtocolException(message='Required field name is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class SuperColumn(object):
+    """
+    A named list of columns.
+    @param name. see Column.name.
+    @param columns. A collection of standard Columns.  The columns within a super column are defined in an adhoc manner.
+                    Columns within a super column do not have to have matching structures (similarly named child columns).
+
+    Attributes:
+     - name
+     - columns
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'name', 'BINARY', None, ),  # 1
+        (2, TType.LIST, 'columns', (TType.STRUCT, (Column, Column.thrift_spec), False), None, ),  # 2
+    )
+
+    def __init__(self, name=None, columns=None,):
+        self.name = name
+        self.columns = columns
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.name = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.LIST:
+                    self.columns = []
+                    (_etype3, _size0) = iprot.readListBegin()
+                    for _i4 in range(_size0):
+                        _elem5 = Column()
+                        _elem5.read(iprot)
+                        self.columns.append(_elem5)
+                    iprot.readListEnd()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('SuperColumn')
+        if self.name is not None:
+            oprot.writeFieldBegin('name', TType.STRING, 1)
+            oprot.writeBinary(self.name)
+            oprot.writeFieldEnd()
+        if self.columns is not None:
+            oprot.writeFieldBegin('columns', TType.LIST, 2)
+            oprot.writeListBegin(TType.STRUCT, len(self.columns))
+            for iter6 in self.columns:
+                iter6.write(oprot)
+            oprot.writeListEnd()
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.name is None:
+            raise TProtocolException(message='Required field name is unset!')
+        if self.columns is None:
+            raise TProtocolException(message='Required field columns is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class CounterColumn(object):
+    """
+    Attributes:
+     - name
+     - value
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'name', 'BINARY', None, ),  # 1
+        (2, TType.I64, 'value', None, None, ),  # 2
+    )
+
+    def __init__(self, name=None, value=None,):
+        self.name = name
+        self.value = value
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.name = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.I64:
+                    self.value = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('CounterColumn')
+        if self.name is not None:
+            oprot.writeFieldBegin('name', TType.STRING, 1)
+            oprot.writeBinary(self.name)
+            oprot.writeFieldEnd()
+        if self.value is not None:
+            oprot.writeFieldBegin('value', TType.I64, 2)
+            oprot.writeI64(self.value)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.name is None:
+            raise TProtocolException(message='Required field name is unset!')
+        if self.value is None:
+            raise TProtocolException(message='Required field value is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class CounterSuperColumn(object):
+    """
+    Attributes:
+     - name
+     - columns
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'name', 'BINARY', None, ),  # 1
+        (2, TType.LIST, 'columns', (TType.STRUCT, (CounterColumn, CounterColumn.thrift_spec), False), None, ),  # 2
+    )
+
+    def __init__(self, name=None, columns=None,):
+        self.name = name
+        self.columns = columns
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.name = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.LIST:
+                    self.columns = []
+                    (_etype10, _size7) = iprot.readListBegin()
+                    for _i11 in range(_size7):
+                        _elem12 = CounterColumn()
+                        _elem12.read(iprot)
+                        self.columns.append(_elem12)
+                    iprot.readListEnd()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('CounterSuperColumn')
+        if self.name is not None:
+            oprot.writeFieldBegin('name', TType.STRING, 1)
+            oprot.writeBinary(self.name)
+            oprot.writeFieldEnd()
+        if self.columns is not None:
+            oprot.writeFieldBegin('columns', TType.LIST, 2)
+            oprot.writeListBegin(TType.STRUCT, len(self.columns))
+            for iter13 in self.columns:
+                iter13.write(oprot)
+            oprot.writeListEnd()
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.name is None:
+            raise TProtocolException(message='Required field name is unset!')
+        if self.columns is None:
+            raise TProtocolException(message='Required field columns is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class ColumnOrSuperColumn(object):
+    """
+    Methods for fetching rows/records from Cassandra will return either a single instance of ColumnOrSuperColumn or a list
+    of ColumnOrSuperColumns (get_slice()). If you're looking up a SuperColumn (or list of SuperColumns) then the resulting
+    instances of ColumnOrSuperColumn will have the requested SuperColumn in the attribute super_column. For queries resulting
+    in Columns, those values will be in the attribute column. This change was made between 0.3 and 0.4 to standardize on
+    single query methods that may return either a SuperColumn or Column.
+
+    If the query was on a counter column family, you will either get a counter_column (instead of a column) or a
+    counter_super_column (instead of a super_column)
+
+    @param column. The Column returned by get() or get_slice().
+    @param super_column. The SuperColumn returned by get() or get_slice().
+    @param counter_column. The Counterolumn returned by get() or get_slice().
+    @param counter_super_column. The CounterSuperColumn returned by get() or get_slice().
+
+    Attributes:
+     - column
+     - super_column
+     - counter_column
+     - counter_super_column
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRUCT, 'column', (Column, Column.thrift_spec), None, ),  # 1
+        (2, TType.STRUCT, 'super_column', (SuperColumn, SuperColumn.thrift_spec), None, ),  # 2
+        (3, TType.STRUCT, 'counter_column', (CounterColumn, CounterColumn.thrift_spec), None, ),  # 3
+        (4, TType.STRUCT, 'counter_super_column', (CounterSuperColumn, CounterSuperColumn.thrift_spec), None, ),  # 4
+    )
+
+    def __init__(self, column=None, super_column=None, counter_column=None, counter_super_column=None,):
+        self.column = column
+        self.super_column = super_column
+        self.counter_column = counter_column
+        self.counter_super_column = counter_super_column
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRUCT:
+                    self.column = Column()
+                    self.column.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRUCT:
+                    self.super_column = SuperColumn()
+                    self.super_column.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.STRUCT:
+                    self.counter_column = CounterColumn()
+                    self.counter_column.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.STRUCT:
+                    self.counter_super_column = CounterSuperColumn()
+                    self.counter_super_column.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('ColumnOrSuperColumn')
+        if self.column is not None:
+            oprot.writeFieldBegin('column', TType.STRUCT, 1)
+            self.column.write(oprot)
+            oprot.writeFieldEnd()
+        if self.super_column is not None:
+            oprot.writeFieldBegin('super_column', TType.STRUCT, 2)
+            self.super_column.write(oprot)
+            oprot.writeFieldEnd()
+        if self.counter_column is not None:
+            oprot.writeFieldBegin('counter_column', TType.STRUCT, 3)
+            self.counter_column.write(oprot)
+            oprot.writeFieldEnd()
+        if self.counter_super_column is not None:
+            oprot.writeFieldBegin('counter_super_column', TType.STRUCT, 4)
+            self.counter_super_column.write(oprot)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class NotFoundException(TException):
+    """
+    A specific column was requested that does not exist.
+    """
+
+    thrift_spec = (
+    )
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('NotFoundException')
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __str__(self):
+        return repr(self)
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class InvalidRequestException(TException):
+    """
+    Invalid request could mean keyspace or column family does not exist, required parameters are missing, or a parameter is malformed.
+    why contains an associated error message.
+
+    Attributes:
+     - why
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'why', 'UTF8', None, ),  # 1
+    )
+
+    def __init__(self, why=None,):
+        self.why = why
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.why = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('InvalidRequestException')
+        if self.why is not None:
+            oprot.writeFieldBegin('why', TType.STRING, 1)
+            oprot.writeString(self.why.encode('utf-8') if sys.version_info[0] == 2 else self.why)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.why is None:
+            raise TProtocolException(message='Required field why is unset!')
+        return
+
+    def __str__(self):
+        return repr(self)
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class UnavailableException(TException):
+    """
+    Not all the replicas required could be created and/or read.
+    """
+
+    thrift_spec = (
+    )
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('UnavailableException')
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __str__(self):
+        return repr(self)
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class TimedOutException(TException):
+    """
+    RPC timeout was exceeded.  either a node failed mid-operation, or load was too high, or the requested op was too large.
+
+    Attributes:
+     - acknowledged_by: if a write operation was acknowledged by some replicas but not by enough to
+    satisfy the required ConsistencyLevel, the number of successful
+    replies will be given here. In case of atomic_batch_mutate method this field
+    will be set to -1 if the batch was written to the batchlog and to 0 if it wasn't.
+     - acknowledged_by_batchlog: in case of atomic_batch_mutate method this field tells if the batch
+    was written to the batchlog.
+     - paxos_in_progress: for the CAS method, this field tells if we timed out during the paxos
+    protocol, as opposed to during the commit of our update
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.I32, 'acknowledged_by', None, None, ),  # 1
+        (2, TType.BOOL, 'acknowledged_by_batchlog', None, None, ),  # 2
+        (3, TType.BOOL, 'paxos_in_progress', None, None, ),  # 3
+    )
+
+    def __init__(self, acknowledged_by=None, acknowledged_by_batchlog=None, paxos_in_progress=None,):
+        self.acknowledged_by = acknowledged_by
+        self.acknowledged_by_batchlog = acknowledged_by_batchlog
+        self.paxos_in_progress = paxos_in_progress
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.I32:
+                    self.acknowledged_by = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.BOOL:
+                    self.acknowledged_by_batchlog = iprot.readBool()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.BOOL:
+                    self.paxos_in_progress = iprot.readBool()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('TimedOutException')
+        if self.acknowledged_by is not None:
+            oprot.writeFieldBegin('acknowledged_by', TType.I32, 1)
+            oprot.writeI32(self.acknowledged_by)
+            oprot.writeFieldEnd()
+        if self.acknowledged_by_batchlog is not None:
+            oprot.writeFieldBegin('acknowledged_by_batchlog', TType.BOOL, 2)
+            oprot.writeBool(self.acknowledged_by_batchlog)
+            oprot.writeFieldEnd()
+        if self.paxos_in_progress is not None:
+            oprot.writeFieldBegin('paxos_in_progress', TType.BOOL, 3)
+            oprot.writeBool(self.paxos_in_progress)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __str__(self):
+        return repr(self)
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class AuthenticationException(TException):
+    """
+    invalid authentication request (invalid keyspace, user does not exist, or credentials invalid)
+
+    Attributes:
+     - why
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'why', 'UTF8', None, ),  # 1
+    )
+
+    def __init__(self, why=None,):
+        self.why = why
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.why = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('AuthenticationException')
+        if self.why is not None:
+            oprot.writeFieldBegin('why', TType.STRING, 1)
+            oprot.writeString(self.why.encode('utf-8') if sys.version_info[0] == 2 else self.why)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.why is None:
+            raise TProtocolException(message='Required field why is unset!')
+        return
+
+    def __str__(self):
+        return repr(self)
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class AuthorizationException(TException):
+    """
+    invalid authorization request (user does not have access to keyspace)
+
+    Attributes:
+     - why
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'why', 'UTF8', None, ),  # 1
+    )
+
+    def __init__(self, why=None,):
+        self.why = why
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.why = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('AuthorizationException')
+        if self.why is not None:
+            oprot.writeFieldBegin('why', TType.STRING, 1)
+            oprot.writeString(self.why.encode('utf-8') if sys.version_info[0] == 2 else self.why)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.why is None:
+            raise TProtocolException(message='Required field why is unset!')
+        return
+
+    def __str__(self):
+        return repr(self)
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class SchemaDisagreementException(TException):
+    """
+    NOTE: This up outdated exception left for backward compatibility reasons,
+    no actual schema agreement validation is done starting from Cassandra 1.2
+
+    schemas are not in agreement across all nodes
+    """
+
+    thrift_spec = (
+    )
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('SchemaDisagreementException')
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __str__(self):
+        return repr(self)
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class ColumnParent(object):
+    """
+    ColumnParent is used when selecting groups of columns from the same ColumnFamily. In directory structure terms, imagine
+    ColumnParent as ColumnPath + '/../'.
+
+    See also <a href="cassandra.html#Struct_ColumnPath">ColumnPath</a>
+
+    Attributes:
+     - column_family
+     - super_column
+    """
+
+    thrift_spec = (
+        None,  # 0
+        None,  # 1
+        None,  # 2
+        (3, TType.STRING, 'column_family', 'UTF8', None, ),  # 3
+        (4, TType.STRING, 'super_column', 'BINARY', None, ),  # 4
+    )
+
+    def __init__(self, column_family=None, super_column=None,):
+        self.column_family = column_family
+        self.super_column = super_column
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 3:
+                if ftype == TType.STRING:
+                    self.column_family = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.STRING:
+                    self.super_column = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('ColumnParent')
+        if self.column_family is not None:
+            oprot.writeFieldBegin('column_family', TType.STRING, 3)
+            oprot.writeString(self.column_family.encode('utf-8') if sys.version_info[0] == 2 else self.column_family)
+            oprot.writeFieldEnd()
+        if self.super_column is not None:
+            oprot.writeFieldBegin('super_column', TType.STRING, 4)
+            oprot.writeBinary(self.super_column)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.column_family is None:
+            raise TProtocolException(message='Required field column_family is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class ColumnPath(object):
+    """
+    The ColumnPath is the path to a single column in Cassandra. It might make sense to think of ColumnPath and
+    ColumnParent in terms of a directory structure.
+
+    ColumnPath is used to looking up a single column.
+
+    @param column_family. The name of the CF of the column being looked up.
+    @param super_column. The super column name.
+    @param column. The column name.
+
+    Attributes:
+     - column_family
+     - super_column
+     - column
+    """
+
+    thrift_spec = (
+        None,  # 0
+        None,  # 1
+        None,  # 2
+        (3, TType.STRING, 'column_family', 'UTF8', None, ),  # 3
+        (4, TType.STRING, 'super_column', 'BINARY', None, ),  # 4
+        (5, TType.STRING, 'column', 'BINARY', None, ),  # 5
+    )
+
+    def __init__(self, column_family=None, super_column=None, column=None,):
+        self.column_family = column_family
+        self.super_column = super_column
+        self.column = column
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 3:
+                if ftype == TType.STRING:
+                    self.column_family = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.STRING:
+                    self.super_column = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 5:
+                if ftype == TType.STRING:
+                    self.column = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('ColumnPath')
+        if self.column_family is not None:
+            oprot.writeFieldBegin('column_family', TType.STRING, 3)
+            oprot.writeString(self.column_family.encode('utf-8') if sys.version_info[0] == 2 else self.column_family)
+            oprot.writeFieldEnd()
+        if self.super_column is not None:
+            oprot.writeFieldBegin('super_column', TType.STRING, 4)
+            oprot.writeBinary(self.super_column)
+            oprot.writeFieldEnd()
+        if self.column is not None:
+            oprot.writeFieldBegin('column', TType.STRING, 5)
+            oprot.writeBinary(self.column)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.column_family is None:
+            raise TProtocolException(message='Required field column_family is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class SliceRange(object):
+    """
+    A slice range is a structure that stores basic range, ordering and limit information for a query that will return
+    multiple columns. It could be thought of as Cassandra's version of LIMIT and ORDER BY
+
+    @param start. The column name to start the slice with. This attribute is not required, though there is no default value,
+                  and can be safely set to '', i.e., an empty byte array, to start with the first column name. Otherwise, it
+                  must a valid value under the rules of the Comparator defined for the given ColumnFamily.
+    @param finish. The column name to stop the slice at. This attribute is not required, though there is no default value,
+                   and can be safely set to an empty byte array to not stop until 'count' results are seen. Otherwise, it
+                   must also be a valid value to the ColumnFamily Comparator.
+    @param reversed. Whether the results should be ordered in reversed order. Similar to ORDER BY blah DESC in SQL.
+    @param count. How many columns to return. Similar to LIMIT in SQL. May be arbitrarily large, but Thrift will
+                  materialize the whole result into memory before returning it to the client, so be aware that you may
+                  be better served by iterating through slices by passing the last value of one call in as the 'start'
+                  of the next instead of increasing 'count' arbitrarily large.
+
+    Attributes:
+     - start
+     - finish
+     - reversed
+     - count
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'start', 'BINARY', None, ),  # 1
+        (2, TType.STRING, 'finish', 'BINARY', None, ),  # 2
+        (3, TType.BOOL, 'reversed', None, False, ),  # 3
+        (4, TType.I32, 'count', None, 100, ),  # 4
+    )
+
+    def __init__(self, start=None, finish=None, reversed=thrift_spec[3][4], count=thrift_spec[4][4],):
+        self.start = start
+        self.finish = finish
+        self.reversed = reversed
+        self.count = count
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.start = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRING:
+                    self.finish = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.BOOL:
+                    self.reversed = iprot.readBool()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.I32:
+                    self.count = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('SliceRange')
+        if self.start is not None:
+            oprot.writeFieldBegin('start', TType.STRING, 1)
+            oprot.writeBinary(self.start)
+            oprot.writeFieldEnd()
+        if self.finish is not None:
+            oprot.writeFieldBegin('finish', TType.STRING, 2)
+            oprot.writeBinary(self.finish)
+            oprot.writeFieldEnd()
+        if self.reversed is not None:
+            oprot.writeFieldBegin('reversed', TType.BOOL, 3)
+            oprot.writeBool(self.reversed)
+            oprot.writeFieldEnd()
+        if self.count is not None:
+            oprot.writeFieldBegin('count', TType.I32, 4)
+            oprot.writeI32(self.count)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.start is None:
+            raise TProtocolException(message='Required field start is unset!')
+        if self.finish is None:
+            raise TProtocolException(message='Required field finish is unset!')
+        if self.reversed is None:
+            raise TProtocolException(message='Required field reversed is unset!')
+        if self.count is None:
+            raise TProtocolException(message='Required field count is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class SlicePredicate(object):
+    """
+    A SlicePredicate is similar to a mathematic predicate (see http://en.wikipedia.org/wiki/Predicate_(mathematical_logic)),
+    which is described as "a property that the elements of a set have in common."
+
+    SlicePredicate's in Cassandra are described with either a list of column_names or a SliceRange.  If column_names is
+    specified, slice_range is ignored.
+
+    @param column_name. A list of column names to retrieve. This can be used similar to Memcached's "multi-get" feature
+                        to fetch N known column names. For instance, if you know you wish to fetch columns 'Joe', 'Jack',
+                        and 'Jim' you can pass those column names as a list to fetch all three at once.
+    @param slice_range. A SliceRange describing how to range, order, and/or limit the slice.
+
+    Attributes:
+     - column_names
+     - slice_range
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.LIST, 'column_names', (TType.STRING, 'BINARY', False), None, ),  # 1
+        (2, TType.STRUCT, 'slice_range', (SliceRange, SliceRange.thrift_spec), None, ),  # 2
+    )
+
+    def __init__(self, column_names=None, slice_range=None,):
+        self.column_names = column_names
+        self.slice_range = slice_range
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.LIST:
+                    self.column_names = []
+                    (_etype17, _size14) = iprot.readListBegin()
+                    for _i18 in range(_size14):
+                        _elem19 = iprot.readBinary()
+                        self.column_names.append(_elem19)
+                    iprot.readListEnd()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRUCT:
+                    self.slice_range = SliceRange()
+                    self.slice_range.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('SlicePredicate')
+        if self.column_names is not None:
+            oprot.writeFieldBegin('column_names', TType.LIST, 1)
+            oprot.writeListBegin(TType.STRING, len(self.column_names))
+            for iter20 in self.column_names:
+                oprot.writeBinary(iter20)
+            oprot.writeListEnd()
+            oprot.writeFieldEnd()
+        if self.slice_range is not None:
+            oprot.writeFieldBegin('slice_range', TType.STRUCT, 2)
+            self.slice_range.write(oprot)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class IndexExpression(object):
+    """
+    Attributes:
+     - column_name
+     - op
+     - value
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'column_name', 'BINARY', None, ),  # 1
+        (2, TType.I32, 'op', None, None, ),  # 2
+        (3, TType.STRING, 'value', 'BINARY', None, ),  # 3
+    )
+
+    def __init__(self, column_name=None, op=None, value=None,):
+        self.column_name = column_name
+        self.op = op
+        self.value = value
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.column_name = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.I32:
+                    self.op = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.STRING:
+                    self.value = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('IndexExpression')
+        if self.column_name is not None:
+            oprot.writeFieldBegin('column_name', TType.STRING, 1)
+            oprot.writeBinary(self.column_name)
+            oprot.writeFieldEnd()
+        if self.op is not None:
+            oprot.writeFieldBegin('op', TType.I32, 2)
+            oprot.writeI32(self.op)
+            oprot.writeFieldEnd()
+        if self.value is not None:
+            oprot.writeFieldBegin('value', TType.STRING, 3)
+            oprot.writeBinary(self.value)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.column_name is None:
+            raise TProtocolException(message='Required field column_name is unset!')
+        if self.op is None:
+            raise TProtocolException(message='Required field op is unset!')
+        if self.value is None:
+            raise TProtocolException(message='Required field value is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class IndexClause(object):
+    """
+    @deprecated use a KeyRange with row_filter in get_range_slices instead
+
+    Attributes:
+     - expressions
+     - start_key
+     - count
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.LIST, 'expressions', (TType.STRUCT, (IndexExpression, IndexExpression.thrift_spec), False), None, ),  # 1
+        (2, TType.STRING, 'start_key', 'BINARY', None, ),  # 2
+        (3, TType.I32, 'count', None, 100, ),  # 3
+    )
+
+    def __init__(self, expressions=None, start_key=None, count=thrift_spec[3][4],):
+        self.expressions = expressions
+        self.start_key = start_key
+        self.count = count
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.LIST:
+                    self.expressions = []
+                    (_etype24, _size21) = iprot.readListBegin()
+                    for _i25 in range(_size21):
+                        _elem26 = IndexExpression()
+                        _elem26.read(iprot)
+                        self.expressions.append(_elem26)
+                    iprot.readListEnd()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRING:
+                    self.start_key = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.I32:
+                    self.count = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('IndexClause')
+        if self.expressions is not None:
+            oprot.writeFieldBegin('expressions', TType.LIST, 1)
+            oprot.writeListBegin(TType.STRUCT, len(self.expressions))
+            for iter27 in self.expressions:
+                iter27.write(oprot)
+            oprot.writeListEnd()
+            oprot.writeFieldEnd()
+        if self.start_key is not None:
+            oprot.writeFieldBegin('start_key', TType.STRING, 2)
+            oprot.writeBinary(self.start_key)
+            oprot.writeFieldEnd()
+        if self.count is not None:
+            oprot.writeFieldBegin('count', TType.I32, 3)
+            oprot.writeI32(self.count)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.expressions is None:
+            raise TProtocolException(message='Required field expressions is unset!')
+        if self.start_key is None:
+            raise TProtocolException(message='Required field start_key is unset!')
+        if self.count is None:
+            raise TProtocolException(message='Required field count is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class KeyRange(object):
+    """
+    The semantics of start keys and tokens are slightly different.
+    Keys are start-inclusive; tokens are start-exclusive.  Token
+    ranges may also wrap -- that is, the end token may be less
+    than the start one.  Thus, a range from keyX to keyX is a
+    one-element range, but a range from tokenY to tokenY is the
+    full ring.
+
+    Attributes:
+     - start_key
+     - end_key
+     - start_token
+     - end_token
+     - row_filter
+     - count
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'start_key', 'BINARY', None, ),  # 1
+        (2, TType.STRING, 'end_key', 'BINARY', None, ),  # 2
+        (3, TType.STRING, 'start_token', 'UTF8', None, ),  # 3
+        (4, TType.STRING, 'end_token', 'UTF8', None, ),  # 4
+        (5, TType.I32, 'count', None, 100, ),  # 5
+        (6, TType.LIST, 'row_filter', (TType.STRUCT, (IndexExpression, IndexExpression.thrift_spec), False), None, ),  # 6
+    )
+
+    def __init__(self, start_key=None, end_key=None, start_token=None, end_token=None, row_filter=None, count=thrift_spec[5][4],):
+        self.start_key = start_key
+        self.end_key = end_key
+        self.start_token = start_token
+        self.end_token = end_token
+        self.row_filter = row_filter
+        self.count = count
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.start_key = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRING:
+                    self.end_key = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.STRING:
+                    self.start_token = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.STRING:
+                    self.end_token = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 6:
+                if ftype == TType.LIST:
+                    self.row_filter = []
+                    (_etype31, _size28) = iprot.readListBegin()
+                    for _i32 in range(_size28):
+                        _elem33 = IndexExpression()
+                        _elem33.read(iprot)
+                        self.row_filter.append(_elem33)
+                    iprot.readListEnd()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 5:
+                if ftype == TType.I32:
+                    self.count = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('KeyRange')
+        if self.start_key is not None:
+            oprot.writeFieldBegin('start_key', TType.STRING, 1)
+            oprot.writeBinary(self.start_key)
+            oprot.writeFieldEnd()
+        if self.end_key is not None:
+            oprot.writeFieldBegin('end_key', TType.STRING, 2)
+            oprot.writeBinary(self.end_key)
+            oprot.writeFieldEnd()
+        if self.start_token is not None:
+            oprot.writeFieldBegin('start_token', TType.STRING, 3)
+            oprot.writeString(self.start_token.encode('utf-8') if sys.version_info[0] == 2 else self.start_token)
+            oprot.writeFieldEnd()
+        if self.end_token is not None:
+            oprot.writeFieldBegin('end_token', TType.STRING, 4)
+            oprot.writeString(self.end_token.encode('utf-8') if sys.version_info[0] == 2 else self.end_token)
+            oprot.writeFieldEnd()
+        if self.count is not None:
+            oprot.writeFieldBegin('count', TType.I32, 5)
+            oprot.writeI32(self.count)
+            oprot.writeFieldEnd()
+        if self.row_filter is not None:
+            oprot.writeFieldBegin('row_filter', TType.LIST, 6)
+            oprot.writeListBegin(TType.STRUCT, len(self.row_filter))
+            for iter34 in self.row_filter:
+                iter34.write(oprot)
+            oprot.writeListEnd()
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.count is None:
+            raise TProtocolException(message='Required field count is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class KeySlice(object):
+    """
+    A KeySlice is key followed by the data it maps to. A collection of KeySlice is returned by the get_range_slice operation.
+
+    @param key. a row key
+    @param columns. List of data represented by the key. Typically, the list is pared down to only the columns specified by
+                    a SlicePredicate.
+
+    Attributes:
+     - key
+     - columns
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'key', 'BINARY', None, ),  # 1
+        (2, TType.LIST, 'columns', (TType.STRUCT, (ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec), False), None, ),  # 2
+    )
+
+    def __init__(self, key=None, columns=None,):
+        self.key = key
+        self.columns = columns
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.key = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.LIST:
+                    self.columns = []
+                    (_etype38, _size35) = iprot.readListBegin()
+                    for _i39 in range(_size35):
+                        _elem40 = ColumnOrSuperColumn()
+                        _elem40.read(iprot)
+                        self.columns.append(_elem40)
+                    iprot.readListEnd()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('KeySlice')
+        if self.key is not None:
+            oprot.writeFieldBegin('key', TType.STRING, 1)
+            oprot.writeBinary(self.key)
+            oprot.writeFieldEnd()
+        if self.columns is not None:
+            oprot.writeFieldBegin('columns', TType.LIST, 2)
+            oprot.writeListBegin(TType.STRUCT, len(self.columns))
+            for iter41 in self.columns:
+                iter41.write(oprot)
+            oprot.writeListEnd()
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.key is None:
+            raise TProtocolException(message='Required field key is unset!')
+        if self.columns is None:
+            raise TProtocolException(message='Required field columns is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class KeyCount(object):
+    """
+    Attributes:
+     - key
+     - count
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'key', 'BINARY', None, ),  # 1
+        (2, TType.I32, 'count', None, None, ),  # 2
+    )
+
+    def __init__(self, key=None, count=None,):
+        self.key = key
+        self.count = count
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.key = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.I32:
+                    self.count = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('KeyCount')
+        if self.key is not None:
+            oprot.writeFieldBegin('key', TType.STRING, 1)
+            oprot.writeBinary(self.key)
+            oprot.writeFieldEnd()
+        if self.count is not None:
+            oprot.writeFieldBegin('count', TType.I32, 2)
+            oprot.writeI32(self.count)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.key is None:
+            raise TProtocolException(message='Required field key is unset!')
+        if self.count is None:
+            raise TProtocolException(message='Required field count is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class Deletion(object):
+    """
+    Note that the timestamp is only optional in case of counter deletion.
+
+    Attributes:
+     - timestamp
+     - super_column
+     - predicate
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.I64, 'timestamp', None, None, ),  # 1
+        (2, TType.STRING, 'super_column', 'BINARY', None, ),  # 2
+        (3, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ),  # 3
+    )
+
+    def __init__(self, timestamp=None, super_column=None, predicate=None,):
+        self.timestamp = timestamp
+        self.super_column = super_column
+        self.predicate = predicate
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.I64:
+                    self.timestamp = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRING:
+                    self.super_column = iprot.readBinary()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.STRUCT:
+                    self.predicate = SlicePredicate()
+                    self.predicate.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('Deletion')
+        if self.timestamp is not None:
+            oprot.writeFieldBegin('timestamp', TType.I64, 1)
+            oprot.writeI64(self.timestamp)
+            oprot.writeFieldEnd()
+        if self.super_column is not None:
+            oprot.writeFieldBegin('super_column', TType.STRING, 2)
+            oprot.writeBinary(self.super_column)
+            oprot.writeFieldEnd()
+        if self.predicate is not None:
+            oprot.writeFieldBegin('predicate', TType.STRUCT, 3)
+            self.predicate.write(oprot)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class Mutation(object):
+    """
+    A Mutation is either an insert (represented by filling column_or_supercolumn) or a deletion (represented by filling the deletion attribute).
+    @param column_or_supercolumn. An insert to a column or supercolumn (possibly counter column or supercolumn)
+    @param deletion. A deletion of a column or supercolumn
+
+    Attributes:
+     - column_or_supercolumn
+     - deletion
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRUCT, 'column_or_supercolumn', (ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec), None, ),  # 1
+        (2, TType.STRUCT, 'deletion', (Deletion, Deletion.thrift_spec), None, ),  # 2
+    )
+
+    def __init__(self, column_or_supercolumn=None, deletion=None,):
+        self.column_or_supercolumn = column_or_supercolumn
+        self.deletion = deletion
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRUCT:
+                    self.column_or_supercolumn = ColumnOrSuperColumn()
+                    self.column_or_supercolumn.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRUCT:
+                    self.deletion = Deletion()
+                    self.deletion.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('Mutation')
+        if self.column_or_supercolumn is not None:
+            oprot.writeFieldBegin('column_or_supercolumn', TType.STRUCT, 1)
+            self.column_or_supercolumn.write(oprot)
+            oprot.writeFieldEnd()
+        if self.deletion is not None:
+            oprot.writeFieldBegin('deletion', TType.STRUCT, 2)
+            self.deletion.write(oprot)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class EndpointDetails(object):
+    """
+    Attributes:
+     - host
+     - datacenter
+     - rack
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'host', 'UTF8', None, ),  # 1
+        (2, TType.STRING, 'datacenter', 'UTF8', None, ),  # 2
+        (3, TType.STRING, 'rack', 'UTF8', None, ),  # 3
+    )
+
+    def __init__(self, host=None, datacenter=None, rack=None,):
+        self.host = host
+        self.datacenter = datacenter
+        self.rack = rack
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.host = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRING:
+                    self.datacenter = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.STRING:
+                    self.rack = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('EndpointDetails')
+        if self.host is not None:
+            oprot.writeFieldBegin('host', TType.STRING, 1)
+            oprot.writeString(self.host.encode('utf-8') if sys.version_info[0] == 2 else self.host)
+            oprot.writeFieldEnd()
+        if self.datacenter is not None:
+            oprot.writeFieldBegin('datacenter', TType.STRING, 2)
+            oprot.writeString(self.datacenter.encode('utf-8') if sys.version_info[0] == 2 else self.datacenter)
+            oprot.writeFieldEnd()
+        if self.rack is not None:
+            oprot.writeFieldBegin('rack', TType.STRING, 3)
+            oprot.writeString(self.rack.encode('utf-8') if sys.version_info[0] == 2 else self.rack)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class CASResult(object):
+    """
+    Attributes:
+     - success
+     - current_values
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.BOOL, 'success', None, None, ),  # 1
+        (2, TType.LIST, 'current_values', (TType.STRUCT, (Column, Column.thrift_spec), False), None, ),  # 2
+    )
+
+    def __init__(self, success=None, current_values=None,):
+        self.success = success
+        self.current_values = current_values
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.BOOL:
+                    self.success = iprot.readBool()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.LIST:
+                    self.current_values = []
+                    (_etype45, _size42) = iprot.readListBegin()
+                    for _i46 in range(_size42):
+                        _elem47 = Column()
+                        _elem47.read(iprot)
+                        self.current_values.append(_elem47)
+                    iprot.readListEnd()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('CASResult')
+        if self.success is not None:
+            oprot.writeFieldBegin('success', TType.BOOL, 1)
+            oprot.writeBool(self.success)
+            oprot.writeFieldEnd()
+        if self.current_values is not None:
+            oprot.writeFieldBegin('current_values', TType.LIST, 2)
+            oprot.writeListBegin(TType.STRUCT, len(self.current_values))
+            for iter48 in self.current_values:
+                iter48.write(oprot)
+            oprot.writeListEnd()
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.success is None:
+            raise TProtocolException(message='Required field success is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class TokenRange(object):
+    """
+    A TokenRange describes part of the Cassandra ring, it is a mapping from a range to
+    endpoints responsible for that range.
+    @param start_token The first token in the range
+    @param end_token The last token in the range
+    @param endpoints The endpoints responsible for the range (listed by their configured listen_address)
+    @param rpc_endpoints The endpoints responsible for the range (listed by their configured rpc_address)
+
+    Attributes:
+     - start_token
+     - end_token
+     - endpoints
+     - rpc_endpoints
+     - endpoint_details
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'start_token', 'UTF8', None, ),  # 1
+        (2, TType.STRING, 'end_token', 'UTF8', None, ),  # 2
+        (3, TType.LIST, 'endpoints', (TType.STRING, 'UTF8', False), None, ),  # 3
+        (4, TType.LIST, 'rpc_endpoints', (TType.STRING, 'UTF8', False), None, ),  # 4
+        (5, TType.LIST, 'endpoint_details', (TType.STRUCT, (EndpointDetails, EndpointDetails.thrift_spec), False), None, ),  # 5
+    )
+
+    def __init__(self, start_token=None, end_token=None, endpoints=None, rpc_endpoints=None, endpoint_details=None,):
+        self.start_token = start_token
+        self.end_token = end_token
+        self.endpoints = endpoints
+        self.rpc_endpoints = rpc_endpoints
+        self.endpoint_details = endpoint_details
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.start_token = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRING:
+                    self.end_token = iprot.readString().d

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[11/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/v22/constants.py
----------------------------------------------------------------------
diff --git a/thrift_bindings/v22/constants.py b/thrift_bindings/v22/constants.py
deleted file mode 100644
index 77c2c92..0000000
--- a/thrift_bindings/v22/constants.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Thrift Compiler (0.9.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-#  options string: py
-#
-
-from thrift.Thrift import (TApplicationException, TException, TMessageType,
-                           TType)
-
-from ttypes import *
-
-VERSION = "20.1.0"


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[22/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/paxos_test.py
----------------------------------------------------------------------
diff --git a/paxos_test.py b/paxos_test.py
new file mode 100644
index 0000000..736ca46
--- /dev/null
+++ b/paxos_test.py
@@ -0,0 +1,195 @@
+import time
+import pytest
+import logging
+
+from threading import Thread
+
+from cassandra import ConsistencyLevel, WriteTimeout
+from cassandra.query import SimpleStatement
+
+from tools.assertions import assert_unavailable
+from dtest import Tester, create_ks
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
+
+@since('2.0.6')
+class TestPaxos(Tester):
+
+    def prepare(self, ordered=False, create_keyspace=True, use_cache=False, nodes=1, rf=1):
+        cluster = self.cluster
+
+        if (ordered):
+            cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")
+
+        if (use_cache):
+            cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})
+
+        cluster.populate(nodes).start()
+        node1 = cluster.nodelist()[0]
+        time.sleep(0.2)
+
+        session = self.patient_cql_connection(node1)
+        if create_keyspace:
+            create_ks(session, 'ks', rf)
+        return session
+
+    def test_replica_availability(self):
+        """
+        @jira_ticket CASSANDRA-8640
+
+        Regression test for a bug (CASSANDRA-8640) that required all nodes to
+        be available in order to run LWT queries, even if the query could
+        complete correctly with quorum nodes available.
+        """
+        session = self.prepare(nodes=3, rf=3)
+        session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
+        session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS")
+
+        self.cluster.nodelist()[2].stop()
+        session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS")
+
+        self.cluster.nodelist()[1].stop()
+        assert_unavailable(session.execute, "INSERT INTO test (k, v) VALUES (2, 2) IF NOT EXISTS")
+
+        self.cluster.nodelist()[1].start(wait_for_binary_proto=True, wait_other_notice=True)
+        session.execute("INSERT INTO test (k, v) VALUES (3, 3) IF NOT EXISTS")
+
+        self.cluster.nodelist()[2].start(wait_for_binary_proto=True)
+        session.execute("INSERT INTO test (k, v) VALUES (4, 4) IF NOT EXISTS")
+
+    @pytest.mark.no_vnodes
+    def test_cluster_availability(self):
+        # Warning, a change in partitioner or a change in CCM token allocation
+        # may require the partition keys of these inserts to be changed.
+        # This must not use vnodes as it relies on assumed token values.
+
+        session = self.prepare(nodes=3)
+        session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
+        session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS")
+
+        self.cluster.nodelist()[2].stop()
+        session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS")
+
+        self.cluster.nodelist()[1].stop()
+        session.execute("INSERT INTO test (k, v) VALUES (3, 2) IF NOT EXISTS")
+
+        self.cluster.nodelist()[1].start(wait_for_binary_proto=True)
+        session.execute("INSERT INTO test (k, v) VALUES (5, 5) IF NOT EXISTS")
+
+        self.cluster.nodelist()[2].start(wait_for_binary_proto=True)
+        session.execute("INSERT INTO test (k, v) VALUES (6, 6) IF NOT EXISTS")
+
+    def test_contention_multi_iterations(self):
+        pytest.skip("Hanging the build")
+        self._contention_test(8, 100)
+
+    # Warning, this test will require you to raise the open
+    # file limit on OSX. Use 'ulimit -n 1000'
+    def test_contention_many_threads(self):
+        self._contention_test(300, 1)
+
+    def _contention_test(self, threads, iterations):
+        """
+        Test threads repeatedly contending on the same row.
+        """
+
+        verbose = False
+
+        session = self.prepare(nodes=3)
+        session.execute("CREATE TABLE test (k int, v int static, id int, PRIMARY KEY (k, id))")
+        session.execute("INSERT INTO test(k, v) VALUES (0, 0)")
+
+        class Worker(Thread):
+
+            def __init__(self, wid, session, iterations, query):
+                Thread.__init__(self)
+                self.wid = wid
+                self.iterations = iterations
+                self.query = query
+                self.session = session
+                self.errors = 0
+                self.retries = 0
+
+            def run(self):
+                global worker_done
+                i = 0
+                prev = 0
+                while i < self.iterations:
+                    done = False
+                    while not done:
+                        try:
+                            res = self.session.execute(self.query, (prev + 1, prev, self.wid))
+                            if verbose:
+                                print("[%3d] CAS %3d -> %3d (res: %s)" % (self.wid, prev, prev + 1, str(res)))
+                            if res[0][0] is True:
+                                done = True
+                                prev = prev + 1
+                            else:
+                                self.retries = self.retries + 1
+                                # There is 2 conditions, so 2 reasons to fail: if we failed because the row with our
+                                # worker ID already exists, it means we timeout earlier but our update did went in,
+                                # so do consider this as a success
+                                prev = res[0][3]
+                                if res[0][2] is not None:
+                                    if verbose:
+                                        print("[%3d] Update was inserted on previous try (res = %s)" % (self.wid, str(res)))
+                                    done = True
+                        except WriteTimeout as e:
+                            if verbose:
+                                print("[%3d] TIMEOUT (%s)" % (self.wid, str(e)))
+                            # This means a timeout: just retry, if it happens that our update was indeed persisted,
+                            # we'll figure it out on the next run.
+                            self.retries = self.retries + 1
+                        except Exception as e:
+                            if verbose:
+                                print("[%3d] ERROR: %s" % (self.wid, str(e)))
+                            self.errors = self.errors + 1
+                            done = True
+                    i = i + 1
+                    # Clean up for next iteration
+                    while True:
+                        try:
+                            self.session.execute("DELETE FROM test WHERE k = 0 AND id = %d IF EXISTS" % self.wid)
+                            break
+                        except WriteTimeout as e:
+                            pass
+
+        nodes = self.cluster.nodelist()
+        workers = []
+
+        c = self.patient_cql_connection(nodes[0], keyspace='ks')
+        q = c.prepare("""
+                BEGIN BATCH
+                   UPDATE test SET v = ? WHERE k = 0 IF v = ?;
+                   INSERT INTO test (k, id) VALUES (0, ?) IF NOT EXISTS;
+                APPLY BATCH
+            """)
+
+        for n in range(0, threads):
+            workers.append(Worker(n, c, iterations, q))
+
+        start = time.time()
+
+        for w in workers:
+            w.start()
+
+        for w in workers:
+            w.join()
+
+        if verbose:
+            runtime = time.time() - start
+            print("runtime:", runtime)
+
+        query = SimpleStatement("SELECT v FROM test WHERE k = 0", consistency_level=ConsistencyLevel.ALL)
+        rows = session.execute(query)
+        value = rows[0][0]
+
+        errors = 0
+        retries = 0
+        for w in workers:
+            errors = errors + w.errors
+            retries = retries + w.retries
+
+        assert (value == threads * iterations) and (errors == 0), "value={}, errors={}, retries={}".format(value, errors, retries)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/paxos_tests.py
----------------------------------------------------------------------
diff --git a/paxos_tests.py b/paxos_tests.py
deleted file mode 100644
index 6c0bd28..0000000
--- a/paxos_tests.py
+++ /dev/null
@@ -1,192 +0,0 @@
-# coding: utf-8
-
-import time
-from threading import Thread
-
-from cassandra import ConsistencyLevel, WriteTimeout
-from cassandra.query import SimpleStatement
-
-from tools.assertions import assert_unavailable
-from dtest import Tester, create_ks
-from tools.decorators import no_vnodes, since
-
-
-@since('2.0.6')
-class TestPaxos(Tester):
-
-    def prepare(self, ordered=False, create_keyspace=True, use_cache=False, nodes=1, rf=1):
-        cluster = self.cluster
-
-        if (ordered):
-            cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")
-
-        if (use_cache):
-            cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})
-
-        cluster.populate(nodes).start()
-        node1 = cluster.nodelist()[0]
-        time.sleep(0.2)
-
-        session = self.patient_cql_connection(node1)
-        if create_keyspace:
-            create_ks(session, 'ks', rf)
-        return session
-
-    def replica_availability_test(self):
-        """
-        @jira_ticket CASSANDRA-8640
-
-        Regression test for a bug (CASSANDRA-8640) that required all nodes to
-        be available in order to run LWT queries, even if the query could
-        complete correctly with quorum nodes available.
-        """
-        session = self.prepare(nodes=3, rf=3)
-        session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
-        session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS")
-
-        self.cluster.nodelist()[2].stop()
-        session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS")
-
-        self.cluster.nodelist()[1].stop()
-        assert_unavailable(session.execute, "INSERT INTO test (k, v) VALUES (2, 2) IF NOT EXISTS")
-
-        self.cluster.nodelist()[1].start(wait_for_binary_proto=True, wait_other_notice=True)
-        session.execute("INSERT INTO test (k, v) VALUES (3, 3) IF NOT EXISTS")
-
-        self.cluster.nodelist()[2].start(wait_for_binary_proto=True)
-        session.execute("INSERT INTO test (k, v) VALUES (4, 4) IF NOT EXISTS")
-
-    @no_vnodes()
-    def cluster_availability_test(self):
-        # Warning, a change in partitioner or a change in CCM token allocation
-        # may require the partition keys of these inserts to be changed.
-        # This must not use vnodes as it relies on assumed token values.
-
-        session = self.prepare(nodes=3)
-        session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
-        session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS")
-
-        self.cluster.nodelist()[2].stop()
-        session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS")
-
-        self.cluster.nodelist()[1].stop()
-        session.execute("INSERT INTO test (k, v) VALUES (3, 2) IF NOT EXISTS")
-
-        self.cluster.nodelist()[1].start(wait_for_binary_proto=True)
-        session.execute("INSERT INTO test (k, v) VALUES (5, 5) IF NOT EXISTS")
-
-        self.cluster.nodelist()[2].start(wait_for_binary_proto=True)
-        session.execute("INSERT INTO test (k, v) VALUES (6, 6) IF NOT EXISTS")
-
-    def contention_test_multi_iterations(self):
-        self.skipTest("Hanging the build")
-        self._contention_test(8, 100)
-
-    # Warning, this test will require you to raise the open
-    # file limit on OSX. Use 'ulimit -n 1000'
-    def contention_test_many_threads(self):
-        self._contention_test(300, 1)
-
-    def _contention_test(self, threads, iterations):
-        """
-        Test threads repeatedly contending on the same row.
-        """
-
-        verbose = False
-
-        session = self.prepare(nodes=3)
-        session.execute("CREATE TABLE test (k int, v int static, id int, PRIMARY KEY (k, id))")
-        session.execute("INSERT INTO test(k, v) VALUES (0, 0)")
-
-        class Worker(Thread):
-
-            def __init__(self, wid, session, iterations, query):
-                Thread.__init__(self)
-                self.wid = wid
-                self.iterations = iterations
-                self.query = query
-                self.session = session
-                self.errors = 0
-                self.retries = 0
-
-            def run(self):
-                global worker_done
-                i = 0
-                prev = 0
-                while i < self.iterations:
-                    done = False
-                    while not done:
-                        try:
-                            res = self.session.execute(self.query, (prev + 1, prev, self.wid))
-                            if verbose:
-                                print "[%3d] CAS %3d -> %3d (res: %s)" % (self.wid, prev, prev + 1, str(res))
-                            if res[0][0] is True:
-                                done = True
-                                prev = prev + 1
-                            else:
-                                self.retries = self.retries + 1
-                                # There is 2 conditions, so 2 reasons to fail: if we failed because the row with our
-                                # worker ID already exists, it means we timeout earlier but our update did went in,
-                                # so do consider this as a success
-                                prev = res[0][3]
-                                if res[0][2] is not None:
-                                    if verbose:
-                                        print "[%3d] Update was inserted on previous try (res = %s)" % (self.wid, str(res))
-                                    done = True
-                        except WriteTimeout as e:
-                            if verbose:
-                                print "[%3d] TIMEOUT (%s)" % (self.wid, str(e))
-                            # This means a timeout: just retry, if it happens that our update was indeed persisted,
-                            # we'll figure it out on the next run.
-                            self.retries = self.retries + 1
-                        except Exception as e:
-                            if verbose:
-                                print "[%3d] ERROR: %s" % (self.wid, str(e))
-                            self.errors = self.errors + 1
-                            done = True
-                    i = i + 1
-                    # Clean up for next iteration
-                    while True:
-                        try:
-                            self.session.execute("DELETE FROM test WHERE k = 0 AND id = %d IF EXISTS" % self.wid)
-                            break
-                        except WriteTimeout as e:
-                            pass
-
-        nodes = self.cluster.nodelist()
-        workers = []
-
-        c = self.patient_cql_connection(nodes[0], keyspace='ks')
-        q = c.prepare("""
-                BEGIN BATCH
-                   UPDATE test SET v = ? WHERE k = 0 IF v = ?;
-                   INSERT INTO test (k, id) VALUES (0, ?) IF NOT EXISTS;
-                APPLY BATCH
-            """)
-
-        for n in range(0, threads):
-            workers.append(Worker(n, c, iterations, q))
-
-        start = time.time()
-
-        for w in workers:
-            w.start()
-
-        for w in workers:
-            w.join()
-
-        if verbose:
-            runtime = time.time() - start
-            print "runtime:", runtime
-
-        query = SimpleStatement("SELECT v FROM test WHERE k = 0", consistency_level=ConsistencyLevel.ALL)
-        rows = session.execute(query)
-        value = rows[0][0]
-
-        errors = 0
-        retries = 0
-        for w in workers:
-            errors = errors + w.errors
-            retries = retries + w.retries
-
-        self.assertTrue((value == threads * iterations) and (errors == 0), "value={}, errors={}, retries={}".format(value, errors, retries))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/pending_range_test.py
----------------------------------------------------------------------
diff --git a/pending_range_test.py b/pending_range_test.py
index efa56f4..55d810b 100644
--- a/pending_range_test.py
+++ b/pending_range_test.py
@@ -1,21 +1,25 @@
+import logging
+import pytest
+
 from cassandra.query import SimpleStatement
-from nose.plugins.attrib import attr
 
-from dtest import TRACE, Tester, debug, create_ks
-from tools.decorators import no_vnodes
+from dtest import Tester, create_ks
+from plugins.assert_tools import assert_regexp_matches
+
+logger = logging.getLogger(__name__)
 
 
-@no_vnodes()
+@pytest.mark.no_vnodes
 class TestPendingRangeMovements(Tester):
 
-    @attr('resource-intensive')
-    def pending_range_test(self):
+    @pytest.mark.resource_intensive
+    def test_pending_range(self):
         """
         @jira_ticket CASSANDRA-10887
         """
         cluster = self.cluster
         # If we are on 2.1, we need to set the log level to debug or higher, as debug.log does not exist.
-        if cluster.version() < '2.2' and not TRACE:
+        if cluster.version() < '2.2':
             cluster.set_log_level('DEBUG')
 
         # Create 5 node cluster
@@ -35,7 +39,7 @@ class TestPendingRangeMovements(Tester):
         lwt_query = SimpleStatement("UPDATE users SET email = 'janedoe@abc.com' WHERE login = 'jdoe3' IF email = 'jdoe@abc.com'")
 
         # Show we can execute LWT no problem
-        for i in xrange(1000):
+        for i in range(1000):
             session.execute(lwt_query)
 
         token = '-634023222112864484'
@@ -61,9 +65,9 @@ class TestPendingRangeMovements(Tester):
 
         # Verify other nodes believe this is Down/Moving
         out, _, _ = node2.nodetool('ring')
-        debug("Nodetool Ring output: {}".format(out))
-        self.assertRegexpMatches(out, '127\.0\.0\.1.*?Down.*?Moving')
+        logger.debug("Nodetool Ring output: {}".format(out))
+        assert_regexp_matches(out, '127\.0\.0\.1.*?Down.*?Moving')
 
         # Check we can still execute LWT
-        for i in xrange(1000):
+        for i in range(1000):
             session.execute(lwt_query)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/plugins/assert_tools.py
----------------------------------------------------------------------
diff --git a/plugins/assert_tools.py b/plugins/assert_tools.py
new file mode 100644
index 0000000..9f796ed
--- /dev/null
+++ b/plugins/assert_tools.py
@@ -0,0 +1,138 @@
+"""
+Copyright 2016 Oliver Schoenborn. BSD 3-Clause license (see __license__ at bottom of this file for details).
+
+This module is part of the nose2pytest distribution.
+
+This module's assert_ functions provide drop-in replacements for nose.tools.assert_ functions (many of which are
+pep-8-ized extractions from Python's unittest.case.TestCase methods). As such, it can be imported in a test
+suite run by py.test, to replace the nose imports with functions that rely on py.test's assertion
+introspection for error reporting.  When combined with running nose2pytest.py on your test suite, this
+module may be sufficient to decrease your test suite's third-party dependencies by 1.
+"""
+
+import unittest
+
+
+__all__ = [
+    'assert_almost_equal',
+    'assert_not_almost_equal',
+    'assert_dict_contains_subset',
+
+    'assert_raises_regex',
+    'assert_raises_regexp',
+    'assert_regexp_matches',
+    'assert_warns_regex',
+]
+
+
+def assert_almost_equal(a, b, places=7, msg=None):
+    """
+    Fail if the two objects are unequal as determined by their
+    difference rounded to the given number of decimal places
+    and comparing to zero.
+
+    Note that decimal places (from zero) are usually not the same
+    as significant digits (measured from the most signficant digit).
+
+    See the builtin round() function for places parameter.
+    """
+    if msg is None:
+        assert round(abs(b - a), places) == 0
+    else:
+        assert round(abs(b - a), places) == 0, msg
+
+
+def assert_not_almost_equal(a, b, places=7, msg=None):
+    """
+    Fail if the two objects are equal as determined by their
+    difference rounded to the given number of decimal places
+    and comparing to zero.
+
+    Note that decimal places (from zero) are usually not the same
+    as significant digits (measured from the most signficant digit).
+
+    See the builtin round() function for places parameter.
+    """
+    if msg is None:
+        assert round(abs(b - a), places) != 0
+    else:
+        assert round(abs(b - a), places) != 0, msg
+
+
+def assert_dict_contains_subset(subset, dictionary, msg=None):
+    """
+    Checks whether dictionary is a superset of subset. If not, the assertion message will have useful details,
+    unless msg is given, then msg is output.
+    """
+    dictionary = dictionary
+    missing_keys = sorted(list(set(subset.keys()) - set(dictionary.keys())))
+    mismatch_vals = {k: (subset[k], dictionary[k]) for k in subset if k in dictionary and subset[k] != dictionary[k]}
+    if msg is None:
+        assert missing_keys == [], 'Missing keys = {}'.format(missing_keys)
+        assert mismatch_vals == {}, 'Mismatched values (s, d) = {}'.format(mismatch_vals)
+    else:
+        assert missing_keys == [], msg
+        assert mismatch_vals == {}, msg
+
+
+# make other unittest.TestCase methods available as-is as functions; trick taken from Nose
+
+class _Dummy(unittest.TestCase):
+    def do_nothing(self):
+        pass
+
+_t = _Dummy('do_nothing')
+
+assert_raises_regex=_t.assertRaisesRegex,
+assert_raises_regexp=_t.assertRaisesRegexp,
+assert_regexp_matches=_t.assertRegexpMatches,
+assert_warns_regex=_t.assertWarnsRegex,
+
+del _Dummy
+del _t
+
+
+# py.test integration: add all assert_ function to the pytest package namespace
+
+# Use similar trick as Nose to bring in bound methods from unittest.TestCase as free functions:
+
+def pytest_namespace() -> {str: callable}:
+    namespace = {}
+    for name, obj in globals().items():
+        if name.startswith('assert_'):
+            namespace[name] = obj
+
+    return namespace
+
+
+# licensing
+
+__license__ = """
+    Copyright (c) 2016, Oliver Schoenborn
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the name of nose2pytest nor the names of its
+      contributors may be used to endorse or promote products derived from
+      this software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/plugins/dtestcollect.py
----------------------------------------------------------------------
diff --git a/plugins/dtestcollect.py b/plugins/dtestcollect.py
deleted file mode 100644
index 6b2dac4..0000000
--- a/plugins/dtestcollect.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import os
-from nose.plugins.base import Plugin
-from nose.case import Test
-import logging
-import unittest
-
-log = logging.getLogger(__name__)
-
-
-class DTestCollect(Plugin):
-    """
-    Collect and output test names only, don't run any tests.
-    """
-    name = 'dtest_collect'
-    enableOpt = 'dtest_collect_only'
-
-    def options(self, parser, env):
-        """Register commandline options.
-        """
-        parser.add_option('--dtest-collect-only',
-                          action='store_true',
-                          dest=self.enableOpt,
-                          default=env.get('DTEST_NOSE_COLLECT_ONLY'),
-                          help="Enable collect-only: %s [COLLECT_ONLY]" %
-                          (self.help()))
-
-    def prepareTestLoader(self, loader):
-        """Install collect-only suite class in TestLoader.
-        """
-        # Disable context awareness
-        log.debug("Preparing test loader")
-        loader.suiteClass = TestSuiteFactory(self.conf)
-
-    def prepareTestCase(self, test):
-        """Replace actual test with dummy that always passes.
-        """
-        # Return something that always passes
-        log.debug("Preparing test case %s", test)
-        if not isinstance(test, Test):
-            return
-
-        def run(result):
-            # We need to make these plugin calls because there won't be
-            # a result proxy, due to using a stripped-down test suite
-            self.conf.plugins.startTest(test)
-            result.startTest(test)
-            self.conf.plugins.addSuccess(test)
-            result.addSuccess(test)
-            self.conf.plugins.stopTest(test)
-            result.stopTest(test)
-        return run
-
-    def describeTest(self, test):
-        tag = os.getenv('TEST_TAG', '')
-        if tag == '':
-            tag = test.test._testMethodName
-        else:
-            tag = test.test._testMethodName + "-" + tag
-        retval = "%s:%s.%s" % (test.test.__module__, test.test.__class__.__name__, tag)
-        return retval
-
-
-class TestSuiteFactory:
-    """
-    Factory for producing configured test suites.
-    """
-    def __init__(self, conf):
-        self.conf = conf
-
-    def __call__(self, tests=(), **kw):
-        return TestSuite(tests, conf=self.conf)
-
-
-class TestSuite(unittest.TestSuite):
-    """
-    Basic test suite that bypasses most proxy and plugin calls, but does
-    wrap tests in a nose.case.Test so prepareTestCase will be called.
-    """
-    def __init__(self, tests=(), conf=None):
-        self.conf = conf
-        # Exec lazy suites: makes discovery depth-first
-        if callable(tests):
-            tests = tests()
-        log.debug("TestSuite(%r)", tests)
-        unittest.TestSuite.__init__(self, tests)
-
-    def addTest(self, test):
-        log.debug("Add test %s", test)
-        if isinstance(test, unittest.TestSuite):
-            self._tests.append(test)
-        else:
-            self._tests.append(Test(test, config=self.conf))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/plugins/dtestconfig.py
----------------------------------------------------------------------
diff --git a/plugins/dtestconfig.py b/plugins/dtestconfig.py
deleted file mode 100644
index 5363d9a..0000000
--- a/plugins/dtestconfig.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from collections import namedtuple
-
-from nose import plugins
-
-# A class that defines the attributes that have to be defined for configuring
-# a dtest run. namedtuple does what we want -- it's immutable and requires
-# all the attributes to be passed in to be instantiated.
-GlobalConfigObject = namedtuple('GlobalConfigObject', [
-    'vnodes',  # disable or enable vnodes
-])
-
-_CONFIG = None
-
-
-class DtestConfigPlugin(plugins.Plugin):
-    """
-    Pass in configuration options for the dtests.
-    """
-    enabled = True  # if this plugin is loaded at all, we're using it
-    name = 'dtest_config'
-
-    def __init__(self, config=None):
-        """
-        Instantiate this plugin with a GlobalConfigObject or, by default, None.
-        Then, set  the global _CONFIG constant with the value of the plugin.
-
-        This is a little weird, yes, but nose seems to generally be built
-        around the idea that a given plugin will be instantiated only once, so
-        this provides a way for test framework code to grab the value off this
-        module. We want that, since the plugin itself isn't available to test
-        code.
-
-        @param config an object meeting the GlobalConfigObject spec that will
-                      be used as configuration for a dtest run.
-        """
-        self.CONFIG = config
-
-        global _CONFIG
-        _CONFIG = self.CONFIG
-
-    def configure(self, options, conf):
-        pass

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/plugins/dtesttag.py
----------------------------------------------------------------------
diff --git a/plugins/dtesttag.py b/plugins/dtesttag.py
deleted file mode 100644
index e1ebb74..0000000
--- a/plugins/dtesttag.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from nose import plugins
-import os
-import inspect
-
-
-class DTestTag(plugins.Plugin):
-    enabled = True  # if this plugin is loaded at all, we're using it
-    name = 'dtest_tag'
-
-    def __init__(self):
-        pass
-
-    def configure(self, options, conf):
-        pass
-
-    def nice_classname(self, obj):
-        """Returns a nice name for class object or class instance.
-
-            >>> nice_classname(Exception()) # doctest: +ELLIPSIS
-            '...Exception'
-            >>> nice_classname(Exception) # doctest: +ELLIPSIS
-            '...Exception'
-
-        """
-        if inspect.isclass(obj):
-            cls_name = obj.__name__
-        else:
-            cls_name = obj.__class__.__name__
-        mod = inspect.getmodule(obj)
-        if mod:
-            name = mod.__name__
-            # jython
-            if name.startswith('org.python.core.'):
-                name = name[len('org.python.core.'):]
-            return "%s.%s" % (name, cls_name)
-        else:
-            return cls_name
-
-    def describeTest(self, test):
-        tag = os.getenv('TEST_TAG', '')
-        if tag == '':
-            tag = test.test._testMethodName
-        else:
-            tag = test.test._testMethodName + "-" + tag
-        return "%s (%s)" % (tag, self.nice_classname(test.test))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/plugins/dtestxunit.py
----------------------------------------------------------------------
diff --git a/plugins/dtestxunit.py b/plugins/dtestxunit.py
deleted file mode 100644
index c996d53..0000000
--- a/plugins/dtestxunit.py
+++ /dev/null
@@ -1,348 +0,0 @@
-"""This plugin provides test results in the standard XUnit XML format.
-
-It's designed for the `Jenkins`_ (previously Hudson) continuous build
-system, but will probably work for anything else that understands an
-XUnit-formatted XML representation of test results.
-
-Add this shell command to your builder ::
-
-    nosetests --with-dtestxunit
-
-And by default a file named nosetests.xml will be written to the
-working directory.
-
-In a Jenkins builder, tick the box named "Publish JUnit test result report"
-under the Post-build Actions and enter this value for Test report XMLs::
-
-    **/nosetests.xml
-
-If you need to change the name or location of the file, you can set the
-``--dtestxunit-file`` option.
-
-If you need to change the name of the test suite, you can set the
-``--dtestxunit-testsuite-name`` option.
-
-Here is an abbreviated version of what an XML test report might look like::
-
-    <?xml version="1.0" encoding="UTF-8"?>
-    <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
-        <testcase classname="path_to_test_suite.TestSomething"
-                  name="test_it" time="0">
-            <error type="exceptions.TypeError" message="oops, wrong type">
-            Traceback (most recent call last):
-            ...
-            TypeError: oops, wrong type
-            </error>
-        </testcase>
-    </testsuite>
-
-.. _Jenkins: http://jenkins-ci.org/
-
-"""
-import codecs
-import os
-import sys
-import re
-import inspect
-from StringIO import StringIO
-from time import time
-from xml.sax import saxutils
-
-from nose.plugins.base import Plugin
-from nose.exc import SkipTest
-from nose.pyversion import force_unicode, format_exception
-
-# Invalid XML characters, control characters 0-31 sans \t, \n and \r
-CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
-
-TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
-
-
-def xml_safe(value):
-    """Replaces invalid XML characters with '?'."""
-    return CONTROL_CHARACTERS.sub('?', value)
-
-
-def escape_cdata(cdata):
-    """Escape a string for an XML CDATA section."""
-    return xml_safe(cdata).replace(']]>', ']]>]]&gt;<![CDATA[')
-
-
-def id_split(idval):
-    m = TEST_ID.match(idval)
-    retval = []
-    if m:
-        name, fargs = m.groups()
-        head, tail = name.rsplit(".", 1)
-        retval = [head, tail + fargs]
-    else:
-        retval = idval.rsplit(".", 1)
-    tag = os.getenv('TEST_TAG', '')
-    if tag != '':
-        retval[-1] = retval[-1] + "-" + tag
-    return retval
-
-
-def nice_classname(obj):
-    """Returns a nice name for class object or class instance.
-
-        >>> nice_classname(Exception()) # doctest: +ELLIPSIS
-        '...Exception'
-        >>> nice_classname(Exception) # doctest: +ELLIPSIS
-        '...Exception'
-
-    """
-    if inspect.isclass(obj):
-        cls_name = obj.__name__
-    else:
-        cls_name = obj.__class__.__name__
-    mod = inspect.getmodule(obj)
-    if mod:
-        name = mod.__name__
-        # jython
-        if name.startswith('org.python.core.'):
-            name = name[len('org.python.core.'):]
-        return "%s.%s" % (name, cls_name)
-    else:
-        return cls_name
-
-
-def exc_message(exc_info):
-    """Return the exception's message."""
-    exc = exc_info[1]
-    if exc is None:
-        # str exception
-        result = exc_info[0]
-    else:
-        try:
-            result = str(exc)
-        except UnicodeEncodeError:
-            try:
-                result = unicode(exc)
-            except UnicodeError:
-                # Fallback to args as neither str nor
-                # unicode(Exception(u'\xe6')) work in Python < 2.6
-                result = exc.args[0]
-    result = force_unicode(result, 'UTF-8')
-    return xml_safe(result)
-
-
-class Tee(object):
-    def __init__(self, encoding, *args):
-        self._encoding = encoding
-        self._streams = args
-
-    def write(self, data):
-        data = force_unicode(data, self._encoding)
-        for s in self._streams:
-            s.write(data)
-
-    def writelines(self, lines):
-        for line in lines:
-            self.write(line)
-
-    def flush(self):
-        for s in self._streams:
-            s.flush()
-
-    def isatty(self):
-        return False
-
-
-class DTestXunit(Plugin):
-    """This plugin provides test results in the standard XUnit XML format."""
-    name = 'dtestxunit'
-    score = 1500
-    encoding = 'UTF-8'
-    error_report_file = None
-
-    def __init__(self):
-        super(DTestXunit, self).__init__()
-        self._capture_stack = []
-        self._currentStdout = None
-        self._currentStderr = None
-
-    def _timeTaken(self):
-        if hasattr(self, '_timer'):
-            taken = time() - self._timer
-        else:
-            # test died before it ran (probably error in setup())
-            # or success/failure added before test started probably
-            # due to custom TestResult munging
-            taken = 0.0
-        return taken
-
-    def _quoteattr(self, attr):
-        """Escape an XML attribute. Value can be unicode."""
-        attr = xml_safe(attr)
-        return saxutils.quoteattr(attr)
-
-    def options(self, parser, env):
-        """Sets additional command line options."""
-        Plugin.options(self, parser, env)
-        parser.add_option(
-            '--dtestxunit-file', action='store',
-            dest='dtestxunit_file', metavar="FILE",
-            default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
-            help=("Path to xml file to store the xunit report in. "
-                  "Default is nosetests.xml in the working directory "
-                  "[NOSE_XUNIT_FILE]"))
-
-        parser.add_option(
-            '--dtestxunit-testsuite-name', action='store',
-            dest='dtestxunit_testsuite_name', metavar="PACKAGE",
-            default=env.get('NOSE_XUNIT_TESTSUITE_NAME', 'nosetests'),
-            help=("Name of the testsuite in the xunit xml, generated by plugin. "
-                  "Default test suite name is nosetests."))
-
-    def configure(self, options, config):
-        """Configures the xunit plugin."""
-        Plugin.configure(self, options, config)
-        self.config = config
-        if self.enabled:
-            self.stats = {'errors': 0,
-                          'failures': 0,
-                          'passes': 0,
-                          'skipped': 0
-                          }
-            self.errorlist = []
-            self.error_report_file_name = os.path.realpath(options.dtestxunit_file)
-            self.xunit_testsuite_name = options.dtestxunit_testsuite_name
-
-    def report(self, stream):
-        """Writes an Xunit-formatted XML file
-
-        The file includes a report of test errors and failures.
-
-        """
-        self.error_report_file = codecs.open(self.error_report_file_name, 'w',
-                                             self.encoding, 'replace')
-        self.stats['encoding'] = self.encoding
-        self.stats['testsuite_name'] = self.xunit_testsuite_name
-        self.stats['total'] = (self.stats['errors'] + self.stats['failures'] +
-                               self.stats['passes'] + self.stats['skipped'])
-        self.error_report_file.write(
-            u'<?xml version="1.0" encoding="%(encoding)s"?>'
-            u'<testsuite name="%(testsuite_name)s" tests="%(total)d" '
-            u'errors="%(errors)d" failures="%(failures)d" '
-            u'skip="%(skipped)d">' % self.stats)
-        self.error_report_file.write(u''.join([force_unicode(e, self.encoding)
-                                               for e in self.errorlist]))
-        self.error_report_file.write(u'</testsuite>')
-        self.error_report_file.close()
-        if self.config.verbosity > 1:
-            stream.writeln("-" * 70)
-            stream.writeln("XML: %s" % self.error_report_file.name)
-
-    def _startCapture(self):
-        self._capture_stack.append((sys.stdout, sys.stderr))
-        self._currentStdout = StringIO()
-        self._currentStderr = StringIO()
-        sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout)
-        sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr)
-
-    def startContext(self, context):
-        self._startCapture()
-
-    def stopContext(self, context):
-        self._endCapture()
-
-    def beforeTest(self, test):
-        """Initializes a timer before starting a test."""
-        self._timer = time()
-        self._startCapture()
-
-    def _endCapture(self):
-        if self._capture_stack:
-            sys.stdout, sys.stderr = self._capture_stack.pop()
-
-    def afterTest(self, test):
-        self._endCapture()
-        self._currentStdout = None
-        self._currentStderr = None
-
-    def finalize(self, test):
-        while self._capture_stack:
-            self._endCapture()
-
-    def _getCapturedStdout(self):
-        if self._currentStdout:
-            value = self._currentStdout.getvalue()
-            if value:
-                return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(value)
-        return ''
-
-    def _getCapturedStderr(self):
-        if self._currentStderr:
-            value = self._currentStderr.getvalue()
-            if value:
-                return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(value)
-        return ''
-
-    def addError(self, test, err, capt=None):
-        """Add error output to Xunit report.
-        """
-        taken = self._timeTaken()
-
-        if issubclass(err[0], SkipTest):
-            type = 'skipped'
-            self.stats['skipped'] += 1
-        else:
-            type = 'error'
-            self.stats['errors'] += 1
-
-        tb = format_exception(err, self.encoding)
-        id = test.id()
-
-        self.errorlist.append(
-            u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
-            u'<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
-            u'</%(type)s>%(systemout)s%(systemerr)s</testcase>' %
-            {'cls': self._quoteattr(id_split(id)[0]),
-             'name': self._quoteattr(id_split(id)[-1]),
-             'taken': taken,
-             'type': type,
-             'errtype': self._quoteattr(nice_classname(err[0])),
-             'message': self._quoteattr(exc_message(err)),
-             'tb': escape_cdata(tb),
-             'systemout': self._getCapturedStdout(),
-             'systemerr': self._getCapturedStderr(),
-             })
-
-    def addFailure(self, test, err, capt=None, tb_info=None):
-        """Add failure output to Xunit report.
-        """
-        taken = self._timeTaken()
-        tb = format_exception(err, self.encoding)
-        self.stats['failures'] += 1
-        id = test.id()
-
-        self.errorlist.append(
-            u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
-            u'<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
-            u'</failure>%(systemout)s%(systemerr)s</testcase>' %
-            {'cls': self._quoteattr(id_split(id)[0]),
-             'name': self._quoteattr(id_split(id)[-1]),
-             'taken': taken,
-             'errtype': self._quoteattr(nice_classname(err[0])),
-             'message': self._quoteattr(exc_message(err)),
-             'tb': escape_cdata(tb),
-             'systemout': self._getCapturedStdout(),
-             'systemerr': self._getCapturedStderr(),
-             })
-
-    def addSuccess(self, test, capt=None):
-        """Add success output to Xunit report.
-        """
-        taken = self._timeTaken()
-        self.stats['passes'] += 1
-        id = test.id()
-        self.errorlist.append(
-            '<testcase classname=%(cls)s name=%(name)s '
-            'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' %
-            {'cls': self._quoteattr(id_split(id)[0]),
-             'name': self._quoteattr(id_split(id)[-1]),
-             'taken': taken,
-             'systemout': self._getCapturedStdout(),
-             'systemerr': self._getCapturedStderr(),
-             })

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/prepared_statements_test.py
----------------------------------------------------------------------
diff --git a/prepared_statements_test.py b/prepared_statements_test.py
index 35ca5aa..72ac9dd 100644
--- a/prepared_statements_test.py
+++ b/prepared_statements_test.py
@@ -1,7 +1,11 @@
+import logging
+
 from cassandra import InvalidRequest
 
 from dtest import Tester
 
+logger = logging.getLogger(__name__)
+
 KEYSPACE = "foo"
 
 
@@ -10,13 +14,12 @@ class TestPreparedStatements(Tester):
     Tests for pushed native protocol notification from Cassandra.
     """
 
-    def dropped_index_test(self):
+    def test_dropped_index(self):
         """
         Prepared statements using dropped indexes should be handled correctly
         """
-
         self.cluster.populate(1).start()
-        node = self.cluster.nodes.values()[0]
+        node = list(self.cluster.nodes.values())[0]
 
         session = self.patient_cql_connection(node)
         session.execute("""
@@ -33,14 +36,14 @@ class TestPreparedStatements(Tester):
             session.execute(insert_statement, (i, 0))
 
         query_statement = session.prepare("SELECT * FROM mytable WHERE b=?")
-        print "Number of matching rows:", len(list(session.execute(query_statement, (0,))))
+        print("Number of matching rows:", len(list(session.execute(query_statement, (0,)))))
 
         session.execute("DROP INDEX bindex")
 
         try:
-            print "Executing prepared statement with dropped index..."
+            print("Executing prepared statement with dropped index...")
             session.execute(query_statement, (0,))
         except InvalidRequest as ir:
-            print ir
+            print(ir)
         except Exception:
             raise

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/pushed_notifications_test.py
----------------------------------------------------------------------
diff --git a/pushed_notifications_test.py b/pushed_notifications_test.py
index a8dcf81..9b888de 100644
--- a/pushed_notifications_test.py
+++ b/pushed_notifications_test.py
@@ -1,4 +1,7 @@
 import time
+import pytest
+import logging
+
 from datetime import datetime
 from distutils.version import LooseVersion
 from threading import Event
@@ -7,10 +10,11 @@ from cassandra import ConsistencyLevel as CL
 from cassandra import ReadFailure
 from cassandra.query import SimpleStatement
 from ccmlib.node import Node, TimeoutError
-from nose.tools import timed
 
-from dtest import Tester, debug, get_ip_from_node, create_ks
-from tools.decorators import no_vnodes, since
+from dtest import Tester, get_ip_from_node, create_ks
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class NotificationWaiter(object):
@@ -48,7 +52,7 @@ class NotificationWaiter(object):
         """
         Called when a notification is pushed from Cassandra.
         """
-        debug("Got {} from {} at {}".format(notification, self.address, datetime.now()))
+        logger.debug("Got {} from {} at {}".format(notification, self.address, datetime.now()))
 
         if self.keyspace and notification['keyspace'] and self.keyspace != notification['keyspace']:
             return  # we are not interested in this schema change
@@ -73,7 +77,7 @@ class NotificationWaiter(object):
         return self.notifications
 
     def clear_notifications(self):
-        debug("Clearing notifications...")
+        logger.debug("Clearing notifications...")
         self.notifications = []
         self.event.clear()
 
@@ -83,8 +87,8 @@ class TestPushedNotifications(Tester):
     Tests for pushed native protocol notification from Cassandra.
     """
 
-    @no_vnodes()
-    def move_single_node_test(self):
+    @pytest.mark.no_vnodes
+    def test_move_single_node(self):
         """
         @jira_ticket CASSANDRA-8516
         Moving a token should result in MOVED_NODE notifications.
@@ -92,30 +96,30 @@ class TestPushedNotifications(Tester):
         self.cluster.populate(3).start(wait_for_binary_proto=True, wait_other_notice=True)
 
         waiters = [NotificationWaiter(self, node, ["TOPOLOGY_CHANGE"])
-                   for node in self.cluster.nodes.values()]
+                   for node in list(self.cluster.nodes.values())]
 
         # The first node sends NEW_NODE for the other 2 nodes during startup, in case they are
         # late due to network delays let's block a bit longer
-        debug("Waiting for unwanted notifications....")
+        logger.debug("Waiting for unwanted notifications....")
         waiters[0].wait_for_notifications(timeout=30, num_notifications=2)
         waiters[0].clear_notifications()
 
-        debug("Issuing move command....")
-        node1 = self.cluster.nodes.values()[0]
+        logger.debug("Issuing move command....")
+        node1 = list(self.cluster.nodes.values())[0]
         node1.move("123")
 
         for waiter in waiters:
-            debug("Waiting for notification from {}".format(waiter.address,))
+            logger.debug("Waiting for notification from {}".format(waiter.address,))
             notifications = waiter.wait_for_notifications(60.0)
-            self.assertEquals(1, len(notifications), notifications)
+            assert 1 == len(notifications), notifications
             notification = notifications[0]
             change_type = notification["change_type"]
             address, port = notification["address"]
-            self.assertEquals("MOVED_NODE", change_type)
-            self.assertEquals(get_ip_from_node(node1), address)
+            assert "MOVED_NODE" == change_type
+            assert get_ip_from_node(node1) == address
 
-    @no_vnodes()
-    def move_single_node_localhost_test(self):
+    @pytest.mark.no_vnodes
+    def test_move_single_node_localhost(self):
         """
         @jira_ticket  CASSANDRA-10052
         Test that we don't get NODE_MOVED notifications from nodes other than the local one,
@@ -132,29 +136,28 @@ class TestPushedNotifications(Tester):
         cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
 
         waiters = [NotificationWaiter(self, node, ["TOPOLOGY_CHANGE"])
-                   for node in self.cluster.nodes.values()]
+                   for node in list(self.cluster.nodes.values())]
 
         # The first node sends NEW_NODE for the other 2 nodes during startup, in case they are
         # late due to network delays let's block a bit longer
-        debug("Waiting for unwanted notifications...")
+        logger.debug("Waiting for unwanted notifications...")
         waiters[0].wait_for_notifications(timeout=30, num_notifications=2)
         waiters[0].clear_notifications()
 
-        debug("Issuing move command....")
-        node1 = self.cluster.nodes.values()[0]
+        logger.debug("Issuing move command....")
+        node1 = list(self.cluster.nodes.values())[0]
         node1.move("123")
 
         for waiter in waiters:
-            debug("Waiting for notification from {}".format(waiter.address,))
+            logger.debug("Waiting for notification from {}".format(waiter.address,))
             notifications = waiter.wait_for_notifications(30.0)
-            self.assertEquals(1 if waiter.node is node1 else 0, len(notifications), notifications)
+            assert 1 if waiter.node is node1 else 0 == len(notifications), notifications
 
-    def restart_node_test(self):
+    def test_restart_node(self):
         """
         @jira_ticket CASSANDRA-7816
         Restarting a node should generate exactly one DOWN and one UP notification
         """
-
         self.cluster.populate(2).start(wait_for_binary_proto=True, wait_other_notice=True)
         node1, node2 = self.cluster.nodelist()
 
@@ -162,7 +165,7 @@ class TestPushedNotifications(Tester):
 
         # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
         # don't confuse the state below.
-        debug("Waiting for unwanted notifications...")
+        logger.debug("Waiting for unwanted notifications...")
         waiter.wait_for_notifications(timeout=30, num_notifications=2)
         waiter.clear_notifications()
 
@@ -171,25 +174,25 @@ class TestPushedNotifications(Tester):
         version = self.cluster.cassandra_version()
         expected_notifications = 2 if version >= '2.2' else 3
         for i in range(5):
-            debug("Restarting second node...")
+            logger.debug("Restarting second node...")
             node2.stop(wait_other_notice=True)
             node2.start(wait_other_notice=True)
-            debug("Waiting for notifications from {}".format(waiter.address))
+            logger.debug("Waiting for notifications from {}".format(waiter.address))
             notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=expected_notifications)
-            self.assertEquals(expected_notifications, len(notifications), notifications)
+            assert expected_notifications, len(notifications) == notifications
             for notification in notifications:
-                self.assertEquals(get_ip_from_node(node2), notification["address"][0])
-            self.assertEquals("DOWN", notifications[0]["change_type"])
+                assert get_ip_from_node(node2) == notification["address"][0]
+            assert "DOWN" == notifications[0]["change_type"]
             if version >= '2.2':
-                self.assertEquals("UP", notifications[1]["change_type"])
+                assert "UP" == notifications[1]["change_type"]
             else:
                 # pre 2.2, we'll receive both a NEW_NODE and an UP notification,
                 # but the order is not guaranteed
-                self.assertEquals({"NEW_NODE", "UP"}, set(map(lambda n: n["change_type"], notifications[1:])))
+                assert {"NEW_NODE", "UP"} == set([n["change_type"] for n in notifications[1:]])
 
             waiter.clear_notifications()
 
-    def restart_node_localhost_test(self):
+    def test_restart_node_localhost(self):
         """
         Test that we don't get client notifications when rpc_address is set to localhost.
         @jira_ticket  CASSANDRA-10052
@@ -209,17 +212,17 @@ class TestPushedNotifications(Tester):
         waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])
 
         # restart node 2
-        debug("Restarting second node...")
+        logger.debug("Restarting second node...")
         node2.stop(wait_other_notice=True)
         node2.start(wait_other_notice=True)
 
         # check that node1 did not send UP or DOWN notification for node2
-        debug("Waiting for notifications from {}".format(waiter.address,))
+        logger.debug("Waiting for notifications from {}".format(waiter.address,))
         notifications = waiter.wait_for_notifications(timeout=30.0, num_notifications=2)
-        self.assertEquals(0, len(notifications), notifications)
+        assert 0 == len(notifications), notifications
 
     @since("2.2")
-    def add_and_remove_node_test(self):
+    def test_add_and_remove_node(self):
         """
         Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
         @jira_ticket CASSANDRA-11038
@@ -231,7 +234,7 @@ class TestPushedNotifications(Tester):
 
         # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
         # don't confuse the state below
-        debug("Waiting for unwanted notifications...")
+        logger.debug("Waiting for unwanted notifications...")
         waiter.wait_for_notifications(timeout=30, num_notifications=2)
         waiter.clear_notifications()
 
@@ -240,29 +243,29 @@ class TestPushedNotifications(Tester):
         session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")
         session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")
 
-        debug("Adding second node...")
+        logger.debug("Adding second node...")
         node2 = Node('node2', self.cluster, True, None, ('127.0.0.2', 7000), '7200', '0', None, binary_interface=('127.0.0.2', 9042))
         self.cluster.add(node2, False)
         node2.start(wait_other_notice=True)
-        debug("Waiting for notifications from {}".format(waiter.address))
+        logger.debug("Waiting for notifications from {}".format(waiter.address))
         notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
-        self.assertEquals(2, len(notifications), notifications)
+        assert 2 == len(notifications), notifications
         for notification in notifications:
-            self.assertEquals(get_ip_from_node(node2), notification["address"][0])
-            self.assertEquals("NEW_NODE", notifications[0]["change_type"])
-            self.assertEquals("UP", notifications[1]["change_type"])
+            assert get_ip_from_node(node2) == notification["address"][0]
+            assert "NEW_NODE" == notifications[0]["change_type"]
+            assert "UP" == notifications[1]["change_type"]
 
-        debug("Removing second node...")
+        logger.debug("Removing second node...")
         waiter.clear_notifications()
         node2.decommission()
         node2.stop(gently=False)
-        debug("Waiting for notifications from {}".format(waiter.address))
+        logger.debug("Waiting for notifications from {}".format(waiter.address))
         notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
-        self.assertEquals(2, len(notifications), notifications)
+        assert 2 == len(notifications), notifications
         for notification in notifications:
-            self.assertEquals(get_ip_from_node(node2), notification["address"][0])
-            self.assertEquals("REMOVED_NODE", notifications[0]["change_type"])
-            self.assertEquals("DOWN", notifications[1]["change_type"])
+            assert get_ip_from_node(node2) == notification["address"][0]
+            assert "REMOVED_NODE" == notifications[0]["change_type"]
+            assert "DOWN" == notifications[1]["change_type"]
 
     def change_rpc_address_to_localhost(self):
         """
@@ -272,23 +275,22 @@ class TestPushedNotifications(Tester):
 
         i = 0
         for node in cluster.nodelist():
-            debug('Set 127.0.0.1 to prevent IPv6 java prefs, set rpc_address: localhost in cassandra.yaml')
+            logger.debug('Set 127.0.0.1 to prevent IPv6 java prefs, set rpc_address: localhost in cassandra.yaml')
             if cluster.version() < '4':
                 node.network_interfaces['thrift'] = ('127.0.0.1', node.network_interfaces['thrift'][1] + i)
             node.network_interfaces['binary'] = ('127.0.0.1', node.network_interfaces['binary'][1] + i)
             node.import_config_files()  # this regenerates the yaml file and sets 'rpc_address' to the 'thrift' address
             node.set_configuration_options(values={'rpc_address': 'localhost'})
-            debug(node.show())
+            logger.debug(node.show())
             i += 2
 
     @since("3.0")
-    def schema_changes_test(self):
+    def test_schema_changes(self):
         """
         @jira_ticket CASSANDRA-10328
         Creating, updating and dropping a keyspace, a table and a materialized view
         will generate the correct schema change notifications.
         """
-
         self.cluster.populate(2).start(wait_for_binary_proto=True)
         node1, node2 = self.cluster.nodelist()
 
@@ -306,17 +308,34 @@ class TestPushedNotifications(Tester):
         session.execute("drop TABLE t")
         session.execute("drop KEYSPACE ks")
 
-        debug("Waiting for notifications from {}".format(waiter.address,))
+        logger.debug("Waiting for notifications from {}".format(waiter.address,))
         notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=8)
-        self.assertEquals(8, len(notifications), notifications)
-        self.assertDictContainsSubset({'change_type': u'CREATED', 'target_type': u'KEYSPACE'}, notifications[0])
-        self.assertDictContainsSubset({'change_type': u'CREATED', 'target_type': u'TABLE', u'table': u't'}, notifications[1])
-        self.assertDictContainsSubset({'change_type': u'UPDATED', 'target_type': u'TABLE', u'table': u't'}, notifications[2])
-        self.assertDictContainsSubset({'change_type': u'CREATED', 'target_type': u'TABLE', u'table': u'mv'}, notifications[3])
-        self.assertDictContainsSubset({'change_type': u'UPDATED', 'target_type': u'TABLE', u'table': u'mv'}, notifications[4])
-        self.assertDictContainsSubset({'change_type': u'DROPPED', 'target_type': u'TABLE', u'table': u'mv'}, notifications[5])
-        self.assertDictContainsSubset({'change_type': u'DROPPED', 'target_type': u'TABLE', u'table': u't'}, notifications[6])
-        self.assertDictContainsSubset({'change_type': u'DROPPED', 'target_type': u'KEYSPACE'}, notifications[7])
+        assert 8 == len(notifications), notifications
+        # assert dict contains subset
+        expected = {'change_type': 'CREATED', 'target_type': 'KEYSPACE'}
+        assert set(notifications[0].keys()) >= expected.keys() and {k: notifications[0][k] for k in expected if
+                                                                    k in notifications[0]} == expected
+        expected = {'change_type': 'CREATED', 'target_type': 'TABLE', 'table': 't'}
+        assert set(notifications[1].keys()) >= expected.keys() and {k: notifications[1][k] for k in expected if
+                                                                    k in notifications[1]} == expected
+        expected = {'change_type': 'UPDATED', 'target_type': 'TABLE', 'table': 't'}
+        assert set(notifications[2].keys()) >= expected.keys() and {k: notifications[2][k] for k in expected if
+                                                                    k in notifications[2]} == expected
+        expected = {'change_type': 'CREATED', 'target_type': 'TABLE', 'table': 'mv'}
+        assert set(notifications[3].keys()) >= expected.keys() and {k: notifications[3][k] for k in expected if
+                                                                    k in notifications[3]} == expected
+        expected = {'change_type': 'UPDATED', 'target_type': 'TABLE', 'table': 'mv'}
+        assert set(notifications[4].keys()) >= expected.keys() and {k: notifications[4][k] for k in expected if
+                                                                    k in notifications[4]} == expected
+        expected = {'change_type': 'DROPPED', 'target_type': 'TABLE', 'table': 'mv'}
+        assert set(notifications[5].keys()) >= expected.keys() and {k: notifications[5][k] for k in expected if
+                                                                    k in notifications[5]} == expected
+        expected = {'change_type': 'DROPPED', 'target_type': 'TABLE', 'table': 't'}
+        assert set(notifications[6].keys()) >= expected.keys() and {k: notifications[6][k] for k in expected if
+                                                                    k in notifications[6]} == expected
+        expected = {'change_type': 'DROPPED', 'target_type': 'KEYSPACE'}
+        assert set(notifications[7].keys()) >= expected.keys() and {k: notifications[7][k] for k in expected if
+                                                                    k in notifications[7]} == expected
 
 
 class TestVariousNotifications(Tester):
@@ -325,17 +344,16 @@ class TestVariousNotifications(Tester):
     """
 
     @since('2.2')
-    def tombstone_failure_threshold_message_test(self):
+    def test_tombstone_failure_threshold_message(self):
         """
         Ensure nodes return an error message in case of TombstoneOverwhelmingExceptions rather
         than dropping the request. A drop makes the coordinator waits for the specified
         read_request_timeout_in_ms.
         @jira_ticket CASSANDRA-7886
         """
-
         have_v5_protocol = self.cluster.version() >= LooseVersion('3.10')
 
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
         self.cluster.set_configuration_options(
             values={
                 'tombstone_failure_threshold': 500,
@@ -356,7 +374,7 @@ class TestVariousNotifications(Tester):
         )
 
         # Add data with tombstones
-        values = map(lambda i: str(i), range(1000))
+        values = [str(i) for i in range(1000)]
         for value in values:
             session.execute(SimpleStatement(
                 "insert into test (id, mytext, col1) values (1, '{}', null) ".format(
@@ -367,15 +385,15 @@ class TestVariousNotifications(Tester):
 
         failure_msg = ("Scanned over.* tombstones.* query aborted")
 
-        @timed(25)
+        @pytest.mark.timeout(25)
         def read_failure_query():
             try:
                 session.execute(SimpleStatement("select * from test where id in (1,2,3,4,5)", consistency_level=CL.ALL))
             except ReadFailure as exc:
                 if have_v5_protocol:
                     # at least one replica should have responded with a tombstone error
-                    self.assertIsNotNone(exc.error_code_map)
-                    self.assertEqual(0x0001, exc.error_code_map.values()[0])
+                    assert exc.error_code_map is not None
+                    assert 0x0001 == list(exc.error_code_map.values())[0]
             except Exception:
                 raise
             else:
@@ -393,22 +411,21 @@ class TestVariousNotifications(Tester):
                        node2.grep_log(failure_msg) or
                        node3.grep_log(failure_msg))
 
-            self.assertTrue(failure, ("Cannot find tombstone failure threshold error in log "
-                                      "after failed query"))
+            assert failure == "Cannot find tombstone failure threshold error in log after failed query"
 
         mark1 = node1.mark_log()
         mark2 = node2.mark_log()
         mark3 = node3.mark_log()
 
-        @timed(35)
+        @pytest.mark.timeout(35)
         def range_request_failure_query():
             try:
                 session.execute(SimpleStatement("select * from test", consistency_level=CL.ALL))
             except ReadFailure as exc:
                 if have_v5_protocol:
                     # at least one replica should have responded with a tombstone error
-                    self.assertIsNotNone(exc.error_code_map)
-                    self.assertEqual(0x0001, exc.error_code_map.values()[0])
+                    assert exc.error_code_map is not None
+                    assert 0x0001 == list(exc.error_code_map.values())[0]
             except Exception:
                 raise
             else:
@@ -426,5 +443,4 @@ class TestVariousNotifications(Tester):
                        node2.grep_log(failure_msg, from_mark=mark2) or
                        node3.grep_log(failure_msg, from_mark=mark3))
 
-            self.assertTrue(failure, ("Cannot find tombstone failure threshold error in log "
-                                      "after range_request_timeout_query"))
+            assert failure == "Cannot find tombstone failure threshold error in log after range_request_timeout_query"

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/putget_test.py
----------------------------------------------------------------------
diff --git a/putget_test.py b/putget_test.py
index f200864..1933f8e 100644
--- a/putget_test.py
+++ b/putget_test.py
@@ -1,28 +1,39 @@
+import pytest
 import time
+import logging
 
 from cassandra import ConsistencyLevel
 from thrift.protocol import TBinaryProtocol
 from thrift.transport import TSocket, TTransport
 
+from dtest_setup_overrides import DTestSetupOverrides
+
 from dtest import Tester, create_ks, create_cf
 from tools.data import (create_c1c2_table, insert_c1c2, insert_columns, putget,
                         query_c1c2, query_columns, range_putget)
-from tools.decorators import no_vnodes, since
 from tools.misc import ImmutableMapping, retry_till_success
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 class TestPutGet(Tester):
-    cluster_options = ImmutableMapping({'start_rpc': 'true'})
 
-    def putget_test(self):
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        dtest_setup_overrides.cluster_options = ImmutableMapping({'start_rpc': 'true'})
+        return dtest_setup_overrides
+
+    def test_putget(self):
         """ Simple put/get on a single row, hitting multiple sstables """
         self._putget()
 
-    def putget_snappy_test(self):
+    def test_putget_snappy(self):
         """ Simple put/get on a single row, but hitting multiple sstables (with snappy compression) """
         self._putget(compression="Snappy")
 
-    def putget_deflate_test(self):
+    def test_putget_deflate(self):
         """ Simple put/get on a single row, but hitting multiple sstables (with deflate compression) """
         self._putget(compression="Deflate")
 
@@ -40,7 +51,7 @@ class TestPutGet(Tester):
 
         putget(cluster, session)
 
-    def non_local_read_test(self):
+    def test_non_local_read(self):
         """ This test reads from a coordinator we know has no copy of the data """
         cluster = self.cluster
 
@@ -53,12 +64,11 @@ class TestPutGet(Tester):
 
         # insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally)
         insert_c1c2(session, n=1000, consistency=ConsistencyLevel.QUORUM)
-        for n in xrange(0, 1000):
+        for n in range(0, 1000):
             query_c1c2(session, n, ConsistencyLevel.QUORUM)
 
-    def rangeputget_test(self):
+    def test_rangeputget(self):
         """ Simple put/get on ranges of rows, hitting multiple sstables """
-
         cluster = self.cluster
 
         cluster.populate(3).start()
@@ -70,7 +80,7 @@ class TestPutGet(Tester):
 
         range_putget(cluster, session)
 
-    def wide_row_test(self):
+    def test_wide_row(self):
         """ Test wide row slices """
         cluster = self.cluster
 
@@ -83,16 +93,16 @@ class TestPutGet(Tester):
 
         key = 'wide'
 
-        for x in xrange(1, 5001):
+        for x in range(1, 5001):
             insert_columns(self, session, key, 100, offset=x - 1)
 
         for size in (10, 100, 1000):
-            for x in xrange(1, (50001 - size) / size):
+            for x in range(1, (50001 - size) // size):
                 query_columns(self, session, key, size, offset=x * size - 1)
 
-    @no_vnodes()
+    @pytest.mark.no_vnodes
     @since('2.0', max_version='4')
-    def wide_slice_test(self):
+    def test_wide_slice(self):
         """
         Check slicing a wide row.
         See https://issues.apache.org/jira/browse/CASSANDRA-4919
@@ -140,9 +150,9 @@ class TestPutGet(Tester):
         session.execute(query)
         time.sleep(.5)
 
-        for i in xrange(10):
+        for i in range(10):
             key_num = str(i).zfill(2)
-            for j in xrange(10):
+            for j in range(10):
                 stmt = "INSERT INTO test (k, column1, value) VALUES ('a%s', 'col%s', '%s')" % (key_num, j, j)
                 session.execute(stmt)
                 stmt = "INSERT INTO test (k, column1, value) VALUES ('b%s', 'col%s', '%s')" % (key_num, j, j)
@@ -172,7 +182,7 @@ class TestPutGet(Tester):
             # print row.key
             # print cols
 
-        self.assertEqual(len(columns), 95, "Regression in cassandra-4919. Expected 95 columns, got {}.".format(len(columns)))
+        assert len(columns) == 95, "Regression in cassandra-4919. Expected 95 columns == got {}.".format(len(columns))
 
 
 class ThriftConnection(object):
@@ -248,7 +258,7 @@ class ThriftConnection(object):
 
     def wait_for_agreement(self):
         schemas = self.client.describe_schema_versions()
-        if len([ss for ss in schemas.keys() if ss != 'UNREACHABLE']) > 1:
+        if len([ss for ss in list(schemas.keys()) if ss != 'UNREACHABLE']) > 1:
             raise Exception("schema agreement not reached")
 
     def _translate_cl(self, cl):
@@ -258,7 +268,7 @@ class ThriftConnection(object):
         """ Insert some basic values """
         cf_parent = self.Cassandra.ColumnParent(column_family=self.cf_name)
 
-        for row_key in ('row_%d' % i for i in xrange(num_rows)):
+        for row_key in ('row_%d' % i for i in range(num_rows)):
             col = self.Cassandra.Column(name='col_0', value='val_0',
                                         timestamp=int(time.time() * 1000))
             retry_till_success(self.client.insert,
@@ -269,7 +279,7 @@ class ThriftConnection(object):
 
     def query_columns(self, num_rows=10, consistency_level='QUORUM'):
         """ Check that the values inserted in insert_columns() are present """
-        for row_key in ('row_%d' % i for i in xrange(num_rows)):
+        for row_key in ('row_%d' % i for i in range(num_rows)):
             cpath = self.Cassandra.ColumnPath(column_family=self.cf_name,
                                               column='col_0')
             cosc = retry_till_success(self.client.get, key=row_key, column_path=cpath,
@@ -277,5 +287,5 @@ class ThriftConnection(object):
                                       timeout=30)
             col = cosc.column
             value = col.value
-            self.assertEqual(value, 'val_0')
+            assert value == 'val_0'
         return self

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/pytest.ini
----------------------------------------------------------------------
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..1ee5342
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+junit_suite_name = Cassandra dtests
+log_print = True
+log_level = INFO
+log_format = %(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/range_ghost_test.py
----------------------------------------------------------------------
diff --git a/range_ghost_test.py b/range_ghost_test.py
index 34f599c..6d3d3ae 100644
--- a/range_ghost_test.py
+++ b/range_ghost_test.py
@@ -1,12 +1,15 @@
 import time
+import logging
 
 from tools.assertions import assert_length_equal
 from dtest import Tester, create_ks, create_cf
 
+logger = logging.getLogger(__name__)
+
 
 class TestRangeGhosts(Tester):
 
-    def ghosts_test(self):
+    def test_ghosts(self):
         """ Check range ghost are correctly removed by the system """
         cluster = self.cluster
         cluster.populate(1).start()
@@ -19,7 +22,7 @@ class TestRangeGhosts(Tester):
 
         rows = 1000
 
-        for i in xrange(0, rows):
+        for i in range(0, rows):
             session.execute("UPDATE cf SET c = 'value' WHERE key = 'k%i'" % i)
 
         res = list(session.execute("SELECT * FROM cf LIMIT 10000"))
@@ -27,7 +30,7 @@ class TestRangeGhosts(Tester):
 
         node1.flush()
 
-        for i in xrange(0, rows / 2):
+        for i in range(0, rows // 2):
             session.execute("DELETE FROM cf WHERE key = 'k%i'" % i)
 
         res = list(session.execute("SELECT * FROM cf LIMIT 10000"))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/read_failures_test.py
----------------------------------------------------------------------
diff --git a/read_failures_test.py b/read_failures_test.py
index ba43505..80ebe5d 100644
--- a/read_failures_test.py
+++ b/read_failures_test.py
@@ -1,9 +1,14 @@
+import logging
+import pytest
+
 from cassandra import ConsistencyLevel, ReadFailure, ReadTimeout
 from cassandra.policies import FallthroughRetryPolicy
 from cassandra.query import SimpleStatement
 
 from dtest import Tester
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 KEYSPACE = "readfailures"
 
@@ -13,19 +18,21 @@ class TestReadFailures(Tester):
     Tests for read failures in the replicas, introduced as a part of
     @jira_ticket CASSANDRA-12311.
     """
-    ignore_log_patterns = (
-        "Scanned over [1-9][0-9]* tombstones",  # This is expected when testing read failures due to tombstones
-    )
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            "Scanned over [1-9][0-9]* tombstones",  # This is expected when testing read failures due to tombstones
+        )
+        return fixture_dtest_setup
 
-    def setUp(self):
-        super(TestReadFailures, self).setUp()
+    @pytest.fixture(scope='function', autouse=True)
+    def parse_dtest_config(self, parse_dtest_config):
         self.tombstone_failure_threshold = 500
         self.replication_factor = 3
         self.consistency_level = ConsistencyLevel.ALL
         self.expected_expt = ReadFailure
 
-    def tearDown(self):
-        super(TestReadFailures, self).tearDown()
+        return parse_dtest_config
 
     def _prepare_cluster(self):
         self.cluster.set_configuration_options(
@@ -33,7 +40,7 @@ class TestReadFailures(Tester):
         )
         self.cluster.populate(3)
         self.cluster.start(wait_for_binary_proto=True)
-        self.nodes = self.cluster.nodes.values()
+        self.nodes = list(self.cluster.nodes.values())
 
         session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version)
 
@@ -59,11 +66,11 @@ class TestReadFailures(Tester):
         if self.expected_expt is None:
             session.execute(statement)
         else:
-            with self.assertRaises(self.expected_expt) as cm:
+            with pytest.raises(self.expected_expt) as cm:
                 # On 2.1, we won't return the ReadTimeout from coordinator until actual timeout,
                 # so we need to up the default timeout of the driver session
                 session.execute(statement, timeout=15)
-            return cm.exception
+            return cm._excinfo[1]
 
     def _assert_error_code_map_exists_with_code(self, exception, expected_code):
         """
@@ -71,14 +78,14 @@ class TestReadFailures(Tester):
         where at least one node responded with some expected code.
         This is meant for testing failure exceptions on protocol v5.
         """
-        self.assertIsNotNone(exception)
-        self.assertIsNotNone(exception.error_code_map)
+        assert exception is not None
+        assert exception.error_code_map is not None
         expected_code_found = False
-        for error_code in exception.error_code_map.values():
+        for error_code in list(exception.error_code_map.values()):
             if error_code == expected_code:
                 expected_code_found = True
                 break
-        self.assertTrue(expected_code_found, "The error code map did not contain " + str(expected_code))
+        assert expected_code_found, "The error code map did not contain " + str(expected_code)
 
     @since('2.1')
     def test_tombstone_failure_v3(self):


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[26/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/jmx_test.py
----------------------------------------------------------------------
diff --git a/jmx_test.py b/jmx_test.py
index 16c1ece..04c05d8 100644
--- a/jmx_test.py
+++ b/jmx_test.py
@@ -1,24 +1,36 @@
 import os
 import time
+import pytest
+import parse
+import re
+import logging
 
 import ccmlib.common
-import parse
 from ccmlib.node import ToolError
 
-from dtest import Tester, debug
-from tools.decorators import since
+from dtest import Tester
 from tools.jmxutils import (JolokiaAgent, enable_jmx_ssl, make_mbean,
                             remove_perf_disable_shared_mem)
 from tools.misc import generate_ssl_stores
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 class TestJMX(Tester):
-    def netstats_test(self):
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            r'Failed to properly handshake with peer.* Closing the channel'
+        )
+
+    def test_netstats(self):
         """
         Check functioning of nodetool netstats, especially with restarts.
         @jira_ticket CASSANDRA-8122, CASSANDRA-6577
         """
-
+        #
         cluster = self.cluster
         cluster.populate(3).start(wait_for_binary_proto=True)
         node1, node2, node3 = cluster.nodelist()
@@ -27,7 +39,7 @@ class TestJMX(Tester):
         node1.flush()
         node1.stop(gently=False)
 
-        with self.assertRaisesRegexp(ToolError, "ConnectException: 'Connection refused( \(Connection refused\))?'."):
+        with pytest.raises(ToolError, message="ConnectException: 'Connection refused( \(Connection refused\))?'."):
             node1.nodetool('netstats')
 
         # don't wait; we're testing for when nodetool is called on a node mid-startup
@@ -42,17 +54,16 @@ class TestJMX(Tester):
             try:
                 node1.nodetool('netstats')
             except Exception as e:
-                self.assertNotIn('java.lang.reflect.UndeclaredThrowableException', str(e),
-                                 'Netstats failed with UndeclaredThrowableException (CASSANDRA-8122)')
+                assert 'java.lang.reflect.UndeclaredThrowableException' not in str(e), \
+                    'Netstats failed with UndeclaredThrowableException (CASSANDRA-8122)'
                 if not isinstance(e, ToolError):
                     raise
                 else:
-                    self.assertRegexpMatches(str(e),
-                                             "ConnectException: 'Connection refused( \(Connection refused\))?'.")
+                    assert re.search("ConnectException: 'Connection refused( \(Connection refused\))?'.", repr(e))
 
-        self.assertTrue(running, msg='node1 never started')
+        assert running, 'node1 never started'
 
-    def table_metric_mbeans_test(self):
+    def test_table_metric_mbeans(self):
         """
         Test some basic table metric mbeans with simple writes.
         """
@@ -66,7 +77,7 @@ class TestJMX(Tester):
         node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])
 
         typeName = "ColumnFamily" if version <= '2.2.X' else 'Table'
-        debug('Version {} typeName {}'.format(version, typeName))
+        logger.debug('Version {} typeName {}'.format(version, typeName))
 
         # TODO the keyspace and table name are capitalized in 2.0
         memtable_size = make_mbean('metrics', type=typeName, keyspace='keyspace1', scope='standard1',
@@ -78,21 +89,21 @@ class TestJMX(Tester):
 
         with JolokiaAgent(node1) as jmx:
             mem_size = jmx.read_attribute(memtable_size, "Value")
-            self.assertGreater(int(mem_size), 10000)
+            assert int(mem_size) > 10000
 
             on_disk_size = jmx.read_attribute(disk_size, "Count")
-            self.assertEquals(int(on_disk_size), 0)
+            assert int(on_disk_size) == 0
 
             node1.flush()
 
             on_disk_size = jmx.read_attribute(disk_size, "Count")
-            self.assertGreater(int(on_disk_size), 10000)
+            assert int(on_disk_size) > 10000
 
             sstables = jmx.read_attribute(sstable_count, "Value")
-            self.assertGreaterEqual(int(sstables), 1)
+            assert int(sstables) >= 1
 
     @since('3.0')
-    def mv_metric_mbeans_release_test(self):
+    def test_mv_metric_mbeans_release(self):
         """
         Test that the right mbeans are created and released when creating mvs
         """
@@ -133,33 +144,33 @@ class TestJMX(Tester):
                                  "but wasn't!"
 
         with JolokiaAgent(node) as jmx:
-            self.assertIsNotNone(jmx.read_attribute(table_memtable_size, "Value"),
-                                 missing_metric_message.format("AllMemtablesHeapSize", "testtable"))
-            self.assertIsNotNone(jmx.read_attribute(table_view_read_time, "Count"),
-                                 missing_metric_message.format("ViewReadTime", "testtable"))
-            self.assertIsNotNone(jmx.read_attribute(table_view_lock_time, "Count"),
-                                 missing_metric_message.format("ViewLockAcquireTime", "testtable"))
-            self.assertIsNotNone(jmx.read_attribute(mv_memtable_size, "Value"),
-                                 missing_metric_message.format("AllMemtablesHeapSize", "testmv"))
-            self.assertRaisesRegexp(Exception, ".*InstanceNotFoundException.*", jmx.read_attribute,
-                                    mbean=mv_view_read_time, attribute="Count", verbose=False)
-            self.assertRaisesRegexp(Exception, ".*InstanceNotFoundException.*", jmx.read_attribute,
-                                    mbean=mv_view_lock_time, attribute="Count", verbose=False)
+            assert jmx.read_attribute(table_memtable_size, "Value") is not None, \
+                missing_metric_message.format("AllMemtablesHeapSize", "testtable")
+            assert jmx.read_attribute(table_view_read_time, "Count") is not None, \
+                missing_metric_message.format("ViewReadTime", "testtable")
+            assert jmx.read_attribute(table_view_lock_time, "Count") is not None, \
+                missing_metric_message.format("ViewLockAcquireTime", "testtable")
+            assert jmx.read_attribute(mv_memtable_size, "Value") is not None, \
+                missing_metric_message.format("AllMemtablesHeapSize", "testmv")
+            with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
+                jmx.read_attribute(mbean=mv_view_read_time, attribute="Count", verbose=False)
+            with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
+                jmx.read_attribute(mbean=mv_view_lock_time, attribute="Count", verbose=False)
 
         node.run_cqlsh(cmds="DROP KEYSPACE mvtest;")
         with JolokiaAgent(node) as jmx:
-            self.assertRaisesRegexp(Exception, ".*InstanceNotFoundException.*", jmx.read_attribute,
-                                    mbean=table_memtable_size, attribute="Value", verbose=False)
-            self.assertRaisesRegexp(Exception, ".*InstanceNotFoundException.*", jmx.read_attribute,
-                                    mbean=table_view_lock_time, attribute="Count", verbose=False)
-            self.assertRaisesRegexp(Exception, ".*InstanceNotFoundException.*", jmx.read_attribute,
-                                    mbean=table_view_read_time, attribute="Count", verbose=False)
-            self.assertRaisesRegexp(Exception, ".*InstanceNotFoundException.*", jmx.read_attribute,
-                                    mbean=mv_memtable_size, attribute="Value", verbose=False)
-            self.assertRaisesRegexp(Exception, ".*InstanceNotFoundException.*", jmx.read_attribute,
-                                    mbean=mv_view_lock_time, attribute="Count", verbose=False)
-            self.assertRaisesRegexp(Exception, ".*InstanceNotFoundException.*", jmx.read_attribute,
-                                    mbean=mv_view_read_time, attribute="Count", verbose=False)
+            with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
+                jmx.read_attribute(mbean=table_memtable_size, attribute="Value", verbose=False)
+            with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
+                jmx.read_attribute(mbean=table_view_lock_time, attribute="Count", verbose=False)
+            with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
+                jmx.read_attribute(mbean=table_view_read_time, attribute="Count", verbose=False)
+            with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
+                jmx.read_attribute(mbean=mv_memtable_size, attribute="Value", verbose=False)
+            with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
+                jmx.read_attribute(mbean=mv_view_lock_time, attribute="Count", verbose=False)
+            with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
+                jmx.read_attribute(mbean=mv_view_read_time, attribute="Count", verbose=False)
 
     def test_compactionstats(self):
         """
@@ -204,14 +215,14 @@ class TestJMX(Tester):
             progress = int(parse.search(var, progress_string).named['progress'])
             updated_progress = int(parse.search(var, updated_progress_string).named['progress'])
 
-            debug(progress_string)
-            debug(updated_progress_string)
+            logger.debug(progress_string)
+            logger.debug(updated_progress_string)
 
             # We want to make sure that the progress is increasing,
             # and that values other than zero are displayed.
-            self.assertGreater(updated_progress, progress)
-            self.assertGreaterEqual(progress, 0)
-            self.assertGreater(updated_progress, 0)
+            assert updated_progress > progress
+            assert progress >= 0
+            assert updated_progress > 0
 
             # Block until the major compaction is complete
             # Otherwise nodetool will throw an exception
@@ -219,41 +230,42 @@ class TestJMX(Tester):
             # and never ends.
             start = time.time()
             max_query_timeout = 600
-            debug("Waiting for compaction to finish:")
+            logger.debug("Waiting for compaction to finish:")
             while (len(jmx.read_attribute(compaction_manager, 'CompactionSummary')) > 0) and (
                     time.time() - start < max_query_timeout):
-                debug(jmx.read_attribute(compaction_manager, 'CompactionSummary'))
+                logger.debug(jmx.read_attribute(compaction_manager, 'CompactionSummary'))
                 time.sleep(2)
 
     @since('2.2')
-    def phi_test(self):
+    def test_phi(self):
         """
         Check functioning of nodetool failuredetector.
         @jira_ticket CASSANDRA-9526
         """
-
         cluster = self.cluster
         cluster.populate(3).start(wait_for_binary_proto=True)
         node1, node2, node3 = cluster.nodelist()
 
-        phivalues = node1.nodetool("failuredetector").stdout.splitlines()
-        endpoint1Values = phivalues[1].split()
-        endpoint2Values = phivalues[2].split()
+        stdout = node1.nodetool("failuredetector").stdout
+        phivalues = stdout.splitlines()
+        endpoint1values = phivalues[1].split()
+        endpoint2values = phivalues[2].split()
 
-        endpoint1 = endpoint1Values[0][1:-1]
-        endpoint2 = endpoint2Values[0][1:-1]
+        endpoint1 = endpoint1values[0][1:-1]
+        endpoint2 = endpoint2values[0][1:-1]
 
-        self.assertItemsEqual([endpoint1, endpoint2], ['127.0.0.2', '127.0.0.3'])
+        assert '127.0.0.2' in [endpoint1, endpoint2]
+        assert '127.0.0.3' in [endpoint1, endpoint2]
 
-        endpoint1Phi = float(endpoint1Values[1])
-        endpoint2Phi = float(endpoint2Values[1])
+        endpoint1phi = float(endpoint1values[1])
+        endpoint2phi = float(endpoint2values[1])
 
         max_phi = 2.0
-        self.assertGreater(endpoint1Phi, 0.0)
-        self.assertLess(endpoint1Phi, max_phi)
+        assert endpoint1phi > 0.0
+        assert endpoint1phi < max_phi
 
-        self.assertGreater(endpoint2Phi, 0.0)
-        self.assertLess(endpoint2Phi, max_phi)
+        assert endpoint2phi > 0.0
+        assert endpoint2phi < max_phi
 
     @since('4.0')
     def test_set_get_batchlog_replay_throttle(self):
@@ -272,9 +284,9 @@ class TestJMX(Tester):
         with JolokiaAgent(node) as jmx:
             mbean = make_mbean('db', 'StorageService')
             jmx.write_attribute(mbean, 'BatchlogReplayThrottleInKB', 4096)
-            self.assertTrue(len(node.grep_log('Updating batchlog replay throttle to 4096 KB/s, 2048 KB/s per endpoint',
-                                              filename='debug.log')) > 0)
-            self.assertEqual(4096, jmx.read_attribute(mbean, 'BatchlogReplayThrottleInKB'))
+            assert len(node.grep_log('Updating batchlog replay throttle to 4096 KB/s, 2048 KB/s per endpoint',
+                                     filename='debug.log')) > 0
+            assert 4096 == jmx.read_attribute(mbean, 'BatchlogReplayThrottleInKB')
 
 
 @since('3.9')
@@ -283,12 +295,12 @@ class TestJMXSSL(Tester):
     truststore_password = 'cassandra'
 
     def truststore(self):
-        return os.path.join(self.test_path, 'truststore.jks')
+        return os.path.join(self.fixture_dtest_setup.test_path, 'truststore.jks')
 
     def keystore(self):
-        return os.path.join(self.test_path, 'keystore.jks')
+        return os.path.join(self.fixture_dtest_setup.test_path, 'keystore.jks')
 
-    def jmx_connection_test(self):
+    def test_jmx_connection(self):
         """
         Check connecting with a JMX client (via nodetool) where SSL is enabled for JMX
         @jira_ticket CASSANDRA-12109
@@ -302,7 +314,7 @@ class TestJMXSSL(Tester):
         node.nodetool("info --ssl -Djavax.net.ssl.trustStore={ts} -Djavax.net.ssl.trustStorePassword={ts_pwd}"
                       .format(ts=self.truststore(), ts_pwd=self.truststore_password))
 
-    def require_client_auth_test(self):
+    def test_require_client_auth(self):
         """
         Check connecting with a JMX client (via nodetool) where SSL is enabled and
         client certificate auth is also configured
@@ -315,7 +327,7 @@ class TestJMXSSL(Tester):
         self.assert_insecure_connection_rejected(node)
 
         # specifying only the truststore containing the server cert should fail
-        with self.assertRaisesRegexp(ToolError, ".*SSLHandshakeException.*"):
+        with pytest.raises(ToolError, match=".*SSLHandshakeException.*"):
             node.nodetool("info --ssl -Djavax.net.ssl.trustStore={ts} -Djavax.net.ssl.trustStorePassword={ts_pwd}"
                           .format(ts=self.truststore(), ts_pwd=self.truststore_password))
 
@@ -329,14 +341,14 @@ class TestJMXSSL(Tester):
         """
         Attempts to connect to JMX (via nodetool) without any client side ssl parameters, expecting failure
         """
-        with self.assertRaises(ToolError):
+        with pytest.raises(ToolError):
             node.nodetool("info")
 
     def _populateCluster(self, require_client_auth=False):
         cluster = self.cluster
         cluster.populate(1)
 
-        generate_ssl_stores(self.test_path)
+        generate_ssl_stores(self.fixture_dtest_setup.test_path)
         if require_client_auth:
             ts = self.truststore()
             ts_pwd = self.truststore_password

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/json_test.py
----------------------------------------------------------------------
diff --git a/json_test.py b/json_test.py
index 81590cb..998751a 100644
--- a/json_test.py
+++ b/json_test.py
@@ -4,13 +4,18 @@ import os
 import re
 import subprocess
 import sys
+import pytest
+import logging
+
 from distutils.version import LooseVersion
 
 from ccmlib import common
 from ccmlib.common import is_win
 
 from dtest import Tester
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 def build_doc_context(tester, test_name, prepare=True, connection=None, nodes=None):
@@ -79,10 +84,10 @@ def build_doc_context(tester, test_name, prepare=True, connection=None, nodes=No
         args = [host, str(port)]
         sys.stdout.flush()
         p = subprocess.Popen([cli] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
-        p.stdin.write("USE {};".format(enabled_ks()))
+        p.stdin.write("USE {};".format(enabled_ks()).encode('utf-8'))
         for cmd in cmds.split(';'):
-            p.stdin.write(cmd + ';\n')
-        p.stdin.write("quit;\n")  # may not be necesary, things could simplify a bit if removed
+            p.stdin.write((cmd + ';\n').encode('utf-8'))
+        p.stdin.write("quit;\n".encode('utf-8'))  # may not be necesary, things could simplify a bit if removed
         return p.communicate()
 
     def cqlsh(cmds, supress_err=False):
@@ -96,7 +101,7 @@ def build_doc_context(tester, test_name, prepare=True, connection=None, nodes=No
 
         # if output is empty string we want to just return None
         if output:
-            return output
+            return output.decode("utf-8")
 
     def cqlsh_print(cmds, supress_err=False):
         """
@@ -130,13 +135,13 @@ def build_doc_context(tester, test_name, prepare=True, connection=None, nodes=No
         if not err:
             raise RuntimeError("Expected cqlsh error but none occurred!")
 
-        return err
+        return err.decode("utf-8")
 
     def cqlsh_err_print(cmds):
         """
         Run cqlsh commands expecting error output, and print error output.
         """
-        print(cqlsh_err(cmds))
+        print((cqlsh_err(cmds)))
 
     def cql(query):
         """
@@ -193,12 +198,12 @@ def run_func_docstring(tester, test_func, globs=None, verbose=False, compileflag
 
 
 @since('2.2')
-class ToJsonSelectTests(Tester):
+class TestToJsonSelect(Tester):
     """
     Tests using toJson with a SELECT statement
     """
 
-    def basic_data_types_test(self):
+    def test_basic_data_types(self):
         """
         Create our schema:
 
@@ -267,10 +272,10 @@ class ToJsonSelectTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.basic_data_types_test)
+        run_func_docstring(tester=self, test_func=self.test_basic_data_types)
 
     # yes, it's probably weird to use json for counter changes
-    def counters_test(self):
+    def test_counters(self):
         """
         Add a table with a few counters:
 
@@ -299,9 +304,9 @@ class ToJsonSelectTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.counters_test)
+        run_func_docstring(tester=self, test_func=self.test_counters)
 
-    def complex_data_types_test(self):
+    def test_complex_data_types(self):
         """
         Build some user types and a schema that uses them:
 
@@ -449,16 +454,16 @@ class ToJsonSelectTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.complex_data_types_test)
+        run_func_docstring(tester=self, test_func=self.test_complex_data_types)
 
 
 @since('2.2')
-class FromJsonUpdateTests(Tester):
+class TestFromJsonUpdate(Tester):
     """
     Tests using fromJson within UPDATE statements.
     """
 
-    def basic_data_types_test(self):
+    def test_basic_data_types(self):
         """
         Create a table with the primitive types:
 
@@ -522,9 +527,9 @@ class FromJsonUpdateTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.basic_data_types_test)
+        run_func_docstring(tester=self, test_func=self.test_basic_data_types)
 
-    def complex_data_types_test(self):
+    def test_complex_data_types(self):
         """"
         UDT and schema setup:
 
@@ -676,9 +681,9 @@ class FromJsonUpdateTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.complex_data_types_test)
+        run_func_docstring(tester=self, test_func=self.test_complex_data_types)
 
-    def collection_update_test(self):
+    def test_collection_update(self):
         """
         Setup schema, add a row:
 
@@ -750,16 +755,16 @@ class FromJsonUpdateTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.collection_update_test)
+        run_func_docstring(tester=self, test_func=self.test_collection_update)
 
 
 @since('2.2')
-class FromJsonSelectTests(Tester):
+class TestFromJsonSelect(Tester):
     """
     Tests using fromJson in conjunction with a SELECT statement
     """
 
-    def selecting_pkey_as_json_test(self):
+    def test_selecting_pkey_as_json(self):
         """
         Schema setup:
 
@@ -793,9 +798,9 @@ class FromJsonSelectTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.selecting_pkey_as_json_test)
+        run_func_docstring(tester=self, test_func=self.test_selecting_pkey_as_json)
 
-    def select_using_secondary_index_test(self):
+    def test_select_using_secondary_index(self):
         """
         Schema setup and secondary index:
 
@@ -832,16 +837,16 @@ class FromJsonSelectTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.select_using_secondary_index_test)
+        run_func_docstring(tester=self, test_func=self.test_select_using_secondary_index)
 
 
 @since('2.2')
-class FromJsonInsertTests(Tester):
+class TestFromJsonInsert(Tester):
     """
     Tests using fromJson within INSERT statements.
     """
 
-    def basic_data_types_test(self):
+    def test_basic_data_types(self):
         """
         Create a table with the primitive types:
 
@@ -902,9 +907,9 @@ class FromJsonInsertTests(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.basic_data_types_test)
+        run_func_docstring(tester=self, test_func=self.test_basic_data_types)
 
-    def complex_data_types_test(self):
+    def test_complex_data_types(self):
         """
         Build some user types and a schema that uses them:
 
@@ -1055,16 +1060,16 @@ class FromJsonInsertTests(Tester):
             (2 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.complex_data_types_test)
+        run_func_docstring(tester=self, test_func=self.test_complex_data_types)
 
 
 @since('2.2')
-class FromJsonDeleteTests(Tester):
+class TestFromJsonDelete(Tester):
     """
     Tests using fromJson within DELETE statements.
     """
 
-    def delete_using_pkey_json_test(self):
+    def test_delete_using_pkey_json(self):
         """
         Schema setup:
 
@@ -1119,16 +1124,16 @@ class FromJsonDeleteTests(Tester):
             <BLANKLINE>
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.delete_using_pkey_json_test)
+        run_func_docstring(tester=self, test_func=self.test_delete_using_pkey_json)
 
 
 @since('2.2')
-class JsonFullRowInsertSelect(Tester):
+class TestJsonFullRowInsertSelect(Tester):
     """
     Tests for creating full rows from json documents, selecting full rows back as json documents, and related functionality.
     """
 
-    def simple_schema_test(self):
+    def test_simple_schema(self):
         """
         Create schema:
 
@@ -1231,9 +1236,9 @@ class JsonFullRowInsertSelect(Tester):
             (2 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.simple_schema_test)
+        run_func_docstring(tester=self, test_func=self.test_simple_schema)
 
-    def pkey_requirement_test(self):
+    def test_pkey_requirement(self):
         """
         Create schema:
 
@@ -1263,9 +1268,9 @@ class JsonFullRowInsertSelect(Tester):
             <stdin>:2:InvalidRequest: Error from server: code=2200 [Invalid query] message="Invalid null value in condition for column key1"
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.pkey_requirement_test)
+        run_func_docstring(tester=self, test_func=self.test_pkey_requirement)
 
-    def null_value_test(self):
+    def test_null_value(self):
         """
         Create schema:
 
@@ -1306,9 +1311,9 @@ class JsonFullRowInsertSelect(Tester):
             (1 rows)
             <BLANKLINE>
         """
-        run_func_docstring(tester=self, test_func=self.null_value_test)
+        run_func_docstring(tester=self, test_func=self.test_null_value)
 
-    def complex_schema_test(self):
+    def test_complex_schema(self):
         """
         Create some udt's and schema:
 
@@ -1507,4 +1512,4 @@ class JsonFullRowInsertSelect(Tester):
             <BLANKLINE>
 
         """
-        run_func_docstring(tester=self, test_func=self.complex_schema_test)
+        run_func_docstring(tester=self, test_func=self.test_complex_schema)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/json_tools_test.py
----------------------------------------------------------------------
diff --git a/json_tools_test.py b/json_tools_test.py
index cdcb0f3..8065c16 100644
--- a/json_tools_test.py
+++ b/json_tools_test.py
@@ -1,28 +1,33 @@
 import os
 import tempfile
+import pytest
+import logging
 
-from dtest import Tester, debug, create_ks
+from dtest import Tester, create_ks
 from tools.data import rows_to_list
-from tools.decorators import since
+from tools.assertions import assert_lists_equal_ignoring_order
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
-@since('0', '2.2.X')
+
+@since('0', max_version='2.2.X')
 class TestJson(Tester):
 
-    def json_tools_test(self):
+    def test_json_tools(self):
 
-        debug("Starting cluster...")
+        logger.debug("Starting cluster...")
         cluster = self.cluster
         cluster.set_batch_commitlog(enabled=True)
         cluster.populate(1).start()
 
-        debug("Version: " + cluster.version().vstring)
+        logger.debug("Version: " + cluster.version().vstring)
 
-        debug("Getting CQLSH...")
+        logger.debug("Getting CQLSH...")
         [node1] = cluster.nodelist()
         session = self.patient_cql_connection(node1)
 
-        debug("Inserting data...")
+        logger.debug("Inserting data...")
         create_ks(session, 'Test', 1)
 
         session.execute("""
@@ -35,20 +40,21 @@ class TestJson(Tester):
             );
         """)
 
-        session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) VALUES('frodo', 'pass@', 'male', 'CA', 1985);")
-        session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) VALUES('sam', '@pass', 'male', 'NY', 1980);")
+        session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) "
+                        "VALUES ('frodo', 'pass@', 'male', 'CA', 1985);")
+        session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) "
+                        "VALUES ('sam', '@pass', 'male', 'NY', 1980);")
 
         res = session.execute("SELECT * FROM Test. users")
 
-        self.assertItemsEqual(rows_to_list(res),
-                              [[u'frodo', 1985, u'male', u'pass@', u'CA'],
-                               [u'sam', 1980, u'male', u'@pass', u'NY']])
+        assert assert_lists_equal_ignoring_order(rows_to_list(res), [['frodo', 1985, 'male', 'pass@', 'CA'],
+                               ['sam', 1980, 'male', '@pass', 'NY']])
 
-        debug("Flushing and stopping cluster...")
+        logger.debug("Flushing and stopping cluster...")
         node1.flush()
         cluster.stop()
 
-        debug("Exporting to JSON file...")
+        logger.debug("Exporting to JSON file...")
         json_path = tempfile.mktemp(suffix='.schema.json')
         with open(json_path, 'w') as f:
             node1.run_sstable2json(f)
@@ -59,11 +65,11 @@ class TestJson(Tester):
             with open(json_path, 'w') as fout:
                 fout.writelines(data[1:])
 
-        debug("Deleting cluster and creating new...")
+        logger.debug("Deleting cluster and creating new...")
         cluster.clear()
         cluster.start()
 
-        debug("Inserting data...")
+        logger.debug("Inserting data...")
         session = self.patient_cql_connection(node1)
         create_ks(session, 'Test', 1)
 
@@ -77,25 +83,25 @@ class TestJson(Tester):
             );
         """)
 
-        session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) VALUES('gandalf', 'p@$$', 'male', 'WA', 1955);")
+        session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) "
+                        "VALUES ('gandalf', 'p@$$', 'male', 'WA', 1955);")
         node1.flush()
         cluster.stop()
 
-        debug("Importing JSON file...")
+        logger.debug("Importing JSON file...")
         with open(json_path) as f:
             node1.run_json2sstable(f, "test", "users")
         os.remove(json_path)
 
-        debug("Verifying import...")
+        logger.debug("Verifying import...")
         cluster.start()
         [node1] = cluster.nodelist()
         session = self.patient_cql_connection(node1)
 
         res = session.execute("SELECT * FROM Test. users")
 
-        debug("data: " + str(res))
+        logger.debug("data: " + str(res))
 
-        self.assertItemsEqual(rows_to_list(res),
-                              [[u'frodo', 1985, u'male', u'pass@', u'CA'],
-                               [u'sam', 1980, u'male', u'@pass', u'NY'],
-                               [u'gandalf', 1955, u'male', u'p@$$', u'WA']])
+        assert rows_to_list(res) == [['frodo', 1985, 'male', 'pass@', 'CA'],
+                               ['sam', 1980, 'male', '@pass', 'NY'],
+                               ['gandalf', 1955, 'male', 'p@$$', 'WA']]

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/largecolumn_test.py
----------------------------------------------------------------------
diff --git a/largecolumn_test.py b/largecolumn_test.py
index d9f3879..261ab75 100644
--- a/largecolumn_test.py
+++ b/largecolumn_test.py
@@ -1,5 +1,11 @@
-from dtest import Tester, debug
-from tools.decorators import since
+import pytest
+import re
+import logging
+
+from dtest import Tester
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('2.2')
@@ -23,16 +29,16 @@ class TestLargeColumn(Tester):
                 return False
 
         output, err, _ = node.nodetool("gcstats")
-        debug(output)
+        logger.debug(output)
         output = output.split("\n")
-        self.assertRegexpMatches(output[0].strip(), 'Interval')
+        assert re.search('Interval', output[0].strip())
         fields = output[1].split()
-        self.assertGreaterEqual(len(fields), 6, "Expected output from nodetool gcstats has at least six fields. However, fields is: {}".format(fields))
+        assert len(fields) >= 6, "Expected output from nodetool gcstats has at least six fields. However >= fields is: {}".format(fields)
         for field in fields:
-            self.assertTrue(is_number(field.strip()) or field == 'NaN', "Expected numeric from fields from nodetool gcstats. However, field.strip() is: {}".format(field.strip()))
+            assert is_number(field.strip()) or field == 'NaN', "Expected numeric from fields from nodetool gcstats. However, field.strip() is: {}".format(field.strip())
         return fields[6]
 
-    def cleanup_test(self):
+    def test_cleanup(self):
         """
         @jira_ticket CASSANDRA-8670
         """
@@ -45,12 +51,12 @@ class TestLargeColumn(Tester):
         node1, node2 = cluster.nodelist()
 
         session = self.patient_cql_connection(node1)
-        debug("Before stress {0}".format(self.directbytes(node1)))
-        debug("Running stress")
+        logger.debug("Before stress {0}".format(self.directbytes(node1)))
+        logger.debug("Running stress")
         # Run the full stack to see how much memory is utilized for "small" columns
         self.stress_with_col_size(cluster, node1, 1)
         beforeStress = self.directbytes(node1)
-        debug("Ran stress once {0}".format(beforeStress))
+        logger.debug("Ran stress once {0}".format(beforeStress))
 
         # Now run the full stack to see how much memory is utilized for "large" columns
         LARGE_COLUMN_SIZE = 1024 * 1024 * 63
@@ -58,9 +64,9 @@ class TestLargeColumn(Tester):
 
         output, err, _ = node1.nodetool("gcstats")
         afterStress = self.directbytes(node1)
-        debug("After stress {0}".format(afterStress))
+        logger.debug("After stress {0}".format(afterStress))
 
         # Any growth in memory usage should not be proportional column size. Really almost no memory should be used
         # since Netty was instructed to use a heap allocator
         diff = int(afterStress) - int(beforeStress)
-        self.assertLess(diff, LARGE_COLUMN_SIZE)
+        assert diff < LARGE_COLUMN_SIZE


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[35/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/batch_test.py
----------------------------------------------------------------------
diff --git a/batch_test.py b/batch_test.py
index f8b9881..7e7b6b2 100644
--- a/batch_test.py
+++ b/batch_test.py
@@ -1,22 +1,24 @@
 import sys
 import time
-from unittest import skipIf
-from nose.tools import assert_greater_equal
+import pytest
+import logging
 
 from cassandra import ConsistencyLevel, Timeout, Unavailable
 from cassandra.query import SimpleStatement
 
-from dtest import Tester, create_ks, debug
+from dtest import Tester, create_ks
 from tools.assertions import (assert_all, assert_invalid, assert_one,
                               assert_unavailable)
-from tools.decorators import since
 from tools.jmxutils import (JolokiaAgent, make_mbean,
                             remove_perf_disable_shared_mem)
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 class TestBatch(Tester):
 
-    def empty_batch_throws_no_error_test(self):
+    def test_empty_batch_throws_no_error(self):
         """
         @jira_ticket CASSANDRA-10711
         """
@@ -26,9 +28,9 @@ class TestBatch(Tester):
             APPLY BATCH;
         """)
         for node in self.cluster.nodelist():
-            self.assertEquals(0, len(node.grep_log_for_errors()))
+            assert 0 == len(node.grep_log_for_errors())
 
-    def counter_batch_accepts_counter_mutations_test(self):
+    def test_counter_batch_accepts_counter_mutations(self):
         """ Test that counter batch accepts counter mutations """
         session = self.prepare()
         session.execute("""
@@ -40,7 +42,7 @@ class TestBatch(Tester):
         """)
         assert_all(session, "SELECT total FROM clicks", [[1], [1], [1]])
 
-    def counter_batch_rejects_regular_mutations_test(self):
+    def test_counter_batch_rejects_regular_mutations(self):
         """ Test that counter batch rejects non-counter mutations """
         session = self.prepare()
         err = "Cannot include non-counter statement in a counter batch"
@@ -54,7 +56,7 @@ class TestBatch(Tester):
             APPLY BATCH
             """, matching=err)
 
-    def logged_batch_accepts_regular_mutations_test(self):
+    def test_logged_batch_accepts_regular_mutations(self):
         """ Test that logged batch accepts regular mutations """
         session = self.prepare()
         session.execute("""
@@ -63,10 +65,10 @@ class TestBatch(Tester):
             INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')
             APPLY BATCH
         """)
-        assert_all(session, "SELECT * FROM users", [[1, u'Will', u'Turner'], [0, u'Jack', u'Sparrow']])
+        assert_all(session, "SELECT * FROM users", [[1, 'Will', 'Turner'], [0, 'Jack', 'Sparrow']])
 
     @since('3.0')
-    def logged_batch_gcgs_below_threshold_single_table_test(self):
+    def test_logged_batch_gcgs_below_threshold_single_table(self):
         """ Test that logged batch accepts regular mutations """
         session = self.prepare()
 
@@ -84,11 +86,11 @@ class TestBatch(Tester):
                                  "batchlog entries, so setting gc_grace_seconds too low on tables "
                                  "involved in an atomic batch might cause batchlog entries to expire "
                                  "before being replayed.")
-        debug(warning)
-        self.assertEquals(1, len(warning), "Cannot find the gc_grace_seconds warning message.")
+        logger.debug(warning)
+        assert 1 == len(warning), "Cannot find the gc_grace_seconds warning message."
 
     @since('3.0')
-    def logged_batch_gcgs_below_threshold_multi_table_test(self):
+    def test_logged_batch_gcgs_below_threshold_multi_table(self):
         """ Test that logged batch accepts regular mutations """
         session = self.prepare()
         session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
@@ -111,11 +113,11 @@ class TestBatch(Tester):
                                  "batchlog entries, so setting gc_grace_seconds too low on tables "
                                  "involved in an atomic batch might cause batchlog entries to expire "
                                  "before being replayed.")
-        debug(warning)
-        self.assertEquals(1, len(warning), "Cannot find the gc_grace_seconds warning message.")
+        logger.debug(warning)
+        assert 1 == len(warning), "Cannot find the gc_grace_seconds warning message."
 
     @since('3.0')
-    def unlogged_batch_gcgs_below_threshold_should_not_print_warning_test(self):
+    def test_unlogged_batch_gcgs_below_threshold_should_not_print_warning(self):
         """ Test that logged batch accepts regular mutations """
         session = self.prepare()
         session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
@@ -127,10 +129,10 @@ class TestBatch(Tester):
         """)
         node1 = self.cluster.nodelist()[0]
         warning = node1.grep_log("setting a too low gc_grace_seconds on tables involved in an atomic batch")
-        debug(warning)
-        self.assertEquals(0, len(warning), "Cannot find the gc_grace_seconds warning message.")
+        logger.debug(warning)
+        assert 0 == len(warning), "Cannot find the gc_grace_seconds warning message."
 
-    def logged_batch_rejects_counter_mutations_test(self):
+    def test_logged_batch_rejects_counter_mutations(self):
         """ Test that logged batch rejects counter mutations """
         session = self.prepare()
         err = "Cannot include a counter statement in a logged batch"
@@ -143,7 +145,7 @@ class TestBatch(Tester):
             APPLY BATCH
             """, matching=err)
 
-    def unlogged_batch_accepts_regular_mutations_test(self):
+    def test_unlogged_batch_accepts_regular_mutations(self):
         """ Test that unlogged batch accepts regular mutations """
         session = self.prepare()
         session.execute("""
@@ -152,9 +154,9 @@ class TestBatch(Tester):
             INSERT INTO users (id, firstname, lastname) VALUES (2, 'Elizabeth', 'Swann')
             APPLY BATCH
         """)
-        assert_all(session, "SELECT * FROM users", [[0, u'Jack', u'Sparrow'], [2, u'Elizabeth', u'Swann']])
+        assert_all(session, "SELECT * FROM users", [[0, 'Jack', 'Sparrow'], [2, 'Elizabeth', 'Swann']])
 
-    def unlogged_batch_rejects_counter_mutations_test(self):
+    def test_unlogged_batch_rejects_counter_mutations(self):
         """ Test that unlogged batch rejects counter mutations """
         session = self.prepare()
         err = "Counter and non-counter mutations cannot exist in the same batch"
@@ -167,7 +169,7 @@ class TestBatch(Tester):
             APPLY BATCH
             """, matching=err)
 
-    def logged_batch_throws_uae_test(self):
+    def test_logged_batch_throws_uae(self):
         """ Test that logged batch throws UAE if there aren't enough live nodes """
         session = self.prepare(nodes=3)
         [node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:]]
@@ -179,7 +181,7 @@ class TestBatch(Tester):
             APPLY BATCH
         """)
 
-    def logged_batch_doesnt_throw_uae_test(self):
+    def test_logged_batch_doesnt_throw_uae(self):
         """ Test that logged batch DOES NOT throw UAE if there are at least 2 live nodes """
         session = self.prepare(nodes=3)
         self.cluster.nodelist()[-1].stop(wait_other_notice=True)
@@ -192,10 +194,10 @@ class TestBatch(Tester):
         session.execute(query)
 
         self.cluster.nodelist()[-1].start(wait_for_binary_proto=True, wait_other_notice=True)
-        assert_all(session, "SELECT * FROM users", [[1, u'Will', u'Turner'], [0, u'Jack', u'Sparrow']],
+        assert_all(session, "SELECT * FROM users", [[1, 'Will', 'Turner'], [0, 'Jack', 'Sparrow']],
                    cl=ConsistencyLevel.ALL)
 
-    def acknowledged_by_batchlog_not_set_when_batchlog_write_fails_test(self):
+    def test_acknowledged_by_batchlog_not_set_when_batchlog_write_fails(self):
         """ Test that acknowledged_by_batchlog is False if batchlog can't be written """
         session = self.prepare(nodes=3, compression=False)
         # kill 2 of the 3 nodes (all the batchlog write candidates).
@@ -207,7 +209,7 @@ class TestBatch(Tester):
             APPLY BATCH
         """, ConsistencyLevel.ONE, received_responses=0)
 
-    def acknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):
+    def test_acknowledged_by_batchlog_set_when_batchlog_write_succeeds(self):
         """ Test that acknowledged_by_batchlog is True if batchlog can be written """
         session = self.prepare(nodes=3, compression=False)
         # kill one of the nodes so that batchlog will be written, but the write will fail.
@@ -219,7 +221,7 @@ class TestBatch(Tester):
             APPLY BATCH
         """, ConsistencyLevel.THREE, received_responses=2)
 
-    def batch_uses_proper_timestamp_test(self):
+    def test_batch_uses_proper_timestamp(self):
         """ Test that each statement will be executed with provided BATCH timestamp """
         session = self.prepare()
         session.execute("""
@@ -231,7 +233,7 @@ class TestBatch(Tester):
         query = "SELECT id, writetime(firstname), writetime(lastname) FROM users"
         assert_all(session, query, [[1, 1111111111111111, 1111111111111111], [0, 1111111111111111, 1111111111111111]])
 
-    def only_one_timestamp_is_valid_test(self):
+    def test_only_one_timestamp_is_valid(self):
         """ Test that TIMESTAMP must not be used in the statements within the batch. """
         session = self.prepare()
         assert_invalid(session, """
@@ -241,7 +243,7 @@ class TestBatch(Tester):
             APPLY BATCH
         """, matching="Timestamp must be set either on BATCH or individual statements")
 
-    def each_statement_in_batch_uses_proper_timestamp_test(self):
+    def test_each_statement_in_batch_uses_proper_timestamp(self):
         """ Test that each statement will be executed with its own timestamp """
         session = self.prepare()
         session.execute("""
@@ -254,9 +256,8 @@ class TestBatch(Tester):
         query = "SELECT id, writetime(firstname), writetime(lastname) FROM users"
         assert_all(session, query, [[1, 1111111111111112, 1111111111111112], [0, 1111111111111111, 1111111111111111]])
 
-    def multi_table_batch_for_10554_test(self):
+    def test_multi_table_batch_for_10554(self):
         """ Test a batch on 2 tables having different columns, restarting the node afterwards, to reproduce CASSANDRA-10554 """
-
         session = self.prepare()
 
         # prepare() adds users and clicks but clicks is a counter table, so adding a random other table for this test.
@@ -289,7 +290,7 @@ class TestBatch(Tester):
         assert_one(session, "SELECT * FROM dogs", [0, 'Pluto'])
 
     @since('3.0', max_version='3.x')
-    def logged_batch_compatibility_1_test(self):
+    def test_logged_batch_compatibility_1(self):
         """
         @jira_ticket CASSANDRA-9673, test that logged batches still work with a mixed version cluster.
 
@@ -298,7 +299,7 @@ class TestBatch(Tester):
         self._logged_batch_compatibility_test(0, 1, 'github:apache/cassandra-2.2', 2, 4)
 
     @since('3.0', max_version='3.x')
-    def batchlog_replay_compatibility_1_test(self):
+    def test_batchlog_replay_compatibility_1(self):
         """
         @jira_ticket CASSANDRA-9673, test that logged batches still work with a mixed version cluster.
 
@@ -307,8 +308,8 @@ class TestBatch(Tester):
         self._batchlog_replay_compatibility_test(0, 1, 'github:apache/cassandra-2.2', 2, 4)
 
     @since('3.0', max_version='3.x')
-    @skipIf(sys.platform == 'win32', 'Windows production support only on 2.2+')
-    def logged_batch_compatibility_2_test(self):
+    @pytest.mark.skipif(sys.platform == 'win32', reason='Windows production support only on 2.2+')
+    def test_logged_batch_compatibility_2(self):
         """
         @jira_ticket CASSANDRA-9673, test that logged batches still work with a mixed version cluster.
 
@@ -317,8 +318,8 @@ class TestBatch(Tester):
         self._logged_batch_compatibility_test(0, 1, 'github:apache/cassandra-2.1', 2, 3)
 
     @since('3.0', max_version='3.x')
-    @skipIf(sys.platform == 'win32', 'Windows production support only on 2.2+')
-    def logged_batch_compatibility_3_test(self):
+    @pytest.mark.skipif(sys.platform == 'win32', reason='Windows production support only on 2.2+')
+    def test_logged_batch_compatibility_3(self):
         """
         @jira_ticket CASSANDRA-9673, test that logged batches still work with a mixed version cluster.
 
@@ -327,7 +328,7 @@ class TestBatch(Tester):
         self._logged_batch_compatibility_test(0, 2, 'github:apache/cassandra-2.1', 1, 3)
 
     @since('3.0', max_version='3.x')
-    def logged_batch_compatibility_4_test(self):
+    def test_logged_batch_compatibility_4(self):
         """
         @jira_ticket CASSANDRA-9673, test that logged batches still work with a mixed version cluster.
 
@@ -336,7 +337,7 @@ class TestBatch(Tester):
         self._logged_batch_compatibility_test(2, 2, 'github:apache/cassandra-2.2', 1, 4)
 
     @since('3.0', max_version='3.x')
-    def batchlog_replay_compatibility_4_test(self):
+    def test_batchlog_replay_compatibility_4(self):
         """
         @jira_ticket CASSANDRA-9673, test that logged batches still work with a mixed version cluster.
 
@@ -345,8 +346,8 @@ class TestBatch(Tester):
         self._batchlog_replay_compatibility_test(2, 2, 'github:apache/cassandra-2.2', 1, 4)
 
     @since('3.0', max_version='3.x')
-    @skipIf(sys.platform == 'win32', 'Windows production support only on 2.2+')
-    def logged_batch_compatibility_5_test(self):
+    @pytest.mark.skipif(sys.platform == 'win32', reason='Windows production support only on 2.2+')
+    def test_logged_batch_compatibility_5(self):
         """
         @jira_ticket CASSANDRA-9673, test that logged batches still work with a mixed version cluster.
 
@@ -365,7 +366,7 @@ class TestBatch(Tester):
         session.execute(query)
         rows = session.execute("SELECT id, firstname, lastname FROM users")
         res = sorted(rows)
-        self.assertEquals([[0, 'Jack', 'Sparrow'], [1, 'Will', 'Turner']], [list(res[0]), list(res[1])])
+        assert [[0, 'Jack', 'Sparrow'], [1, 'Will', 'Turner']], [list(res[0]) == list(res[1])]
 
     def _batchlog_replay_compatibility_test(self, coordinator_idx, current_nodes, previous_version, previous_nodes, protocol_version):
         session = self.prepare_mixed(coordinator_idx, current_nodes, previous_version, previous_nodes,
@@ -373,7 +374,7 @@ class TestBatch(Tester):
 
         coordinator = self.cluster.nodelist()[coordinator_idx]
         coordinator.byteman_submit(['./byteman/fail_after_batchlog_write.btm'])
-        debug("Injected byteman scripts to enable batchlog replay {}".format(coordinator.name))
+        logger.debug("Injected byteman scripts to enable batchlog replay {}".format(coordinator.name))
 
         query = """
             BEGIN BATCH
@@ -387,7 +388,7 @@ class TestBatch(Tester):
         # 2 * write_request_timeout_in_ms ms: 1x timeout for all mutations to be written,
         # and another 1x timeout for batch remove mutation to be received.
         delay = 2 * coordinator.get_conf_option('write_request_timeout_in_ms') / 1000.0 + 1
-        debug('Sleeping for {}s for the batches to not be skipped'.format(delay))
+        logger.debug('Sleeping for {}s for the batches to not be skipped'.format(delay))
         time.sleep(delay)
 
         total_batches_replayed = 0
@@ -398,18 +399,18 @@ class TestBatch(Tester):
                 continue
 
             with JolokiaAgent(n) as jmx:
-                debug('Forcing batchlog replay for {}'.format(n.name))
+                logger.debug('Forcing batchlog replay for {}'.format(n.name))
                 jmx.execute_method(blm, 'forceBatchlogReplay')
                 batches_replayed = jmx.read_attribute(blm, 'TotalBatchesReplayed')
-                debug('{} batches replayed on node {}'.format(batches_replayed, n.name))
+                logger.debug('{} batches replayed on node {}'.format(batches_replayed, n.name))
                 total_batches_replayed += batches_replayed
 
-        assert_greater_equal(total_batches_replayed, 2)
+        assert total_batches_replayed >= 2
 
         for node in self.cluster.nodelist():
             session = self.patient_exclusive_cql_connection(node, protocol_version=protocol_version)
             rows = sorted(session.execute('SELECT id, firstname, lastname FROM ks.users'))
-            self.assertEqual([[0, 'Jack', 'Sparrow'], [1, 'Will', 'Turner']], [list(rows[0]), list(rows[1])])
+            assert [[0, 'Jack', 'Sparrow'], [1, 'Will', 'Turner']], [list(rows[0]) == list(rows[1])]
 
     def assert_timedout(self, session, query, cl, acknowledged_by=None,
                         received_responses=None):
@@ -420,12 +421,12 @@ class TestBatch(Tester):
             if received_responses is not None:
                 msg = "Expecting received_responses to be {}, got: {}".format(
                     received_responses, e.received_responses,)
-                self.assertEqual(e.received_responses, received_responses, msg)
+                assert e.received_responses == received_responses, msg
         except Unavailable as e:
             if received_responses is not None:
                 msg = "Expecting alive_replicas to be {}, got: {}".format(
                     received_responses, e.alive_replicas,)
-                self.assertEqual(e.alive_replicas, received_responses, msg)
+                assert e.alive_replicas == received_responses, msg
         except Exception as e:
             assert False, "Expecting TimedOutException, got:" + str(e)
         else:
@@ -434,7 +435,7 @@ class TestBatch(Tester):
     def prepare(self, nodes=1, compression=True, version=None, protocol_version=None, install_byteman=False):
         if version:
             self.cluster.set_install_dir(version=version)
-            debug("Set cassandra dir to {}".format(self.cluster.get_install_dir()))
+            logger.debug("Set cassandra dir to {}".format(self.cluster.get_install_dir()))
 
         self.cluster.populate(nodes, install_byteman=install_byteman)
 
@@ -449,7 +450,7 @@ class TestBatch(Tester):
         return session
 
     def create_schema(self, session, rf):
-        debug('Creating schema...')
+        logger.debug('Creating schema...')
         create_ks(session, 'ks', rf)
 
         session.execute("""
@@ -472,19 +473,22 @@ class TestBatch(Tester):
 
         time.sleep(.5)
 
-    def prepare_mixed(self, coordinator_idx, current_nodes, previous_version, previous_nodes, compression=True, protocol_version=None, install_byteman=False):
-        debug("Testing with {} node(s) at version '{}', {} node(s) at current version"
+    def prepare_mixed(self, coordinator_idx, current_nodes, previous_version, previous_nodes, compression=True,
+                      protocol_version=None, install_byteman=False):
+        logger.debug("Testing with {} node(s) at version '{}', {} node(s) at current version"
               .format(previous_nodes, previous_version, current_nodes))
 
         # start a cluster using the previous version
-        self.prepare(previous_nodes + current_nodes, compression, previous_version, protocol_version=protocol_version, install_byteman=install_byteman)
+        self.prepare(previous_nodes + current_nodes, compression, previous_version, protocol_version=protocol_version,
+                     install_byteman=install_byteman)
 
         # then upgrade the current nodes to the current version but not the previous nodes
-        for i in xrange(current_nodes):
+        for i in range(current_nodes):
             node = self.cluster.nodelist()[i]
             self.upgrade_node(node)
 
-        session = self.patient_exclusive_cql_connection(self.cluster.nodelist()[coordinator_idx], protocol_version=protocol_version)
+        session = self.patient_exclusive_cql_connection(self.cluster.nodelist()[coordinator_idx],
+                                                        protocol_version=protocol_version)
         session.execute('USE ks')
         return session
 
@@ -492,13 +496,13 @@ class TestBatch(Tester):
         """
         Upgrade a node to the current version
         """
-        debug('Upgrading {} to the current version'.format(node.name))
-        debug('Shutting down {}'.format(node.name))
+        logger.debug('Upgrading {} to the current version'.format(node.name))
+        logger.debug('Shutting down {}'.format(node.name))
         node.stop(wait_other_notice=False)
         self.set_node_to_current_version(node)
-        debug("Set cassandra dir for {} to {}".format(node.name, node.get_install_dir()))
+        logger.debug("Set cassandra dir for {} to {}".format(node.name, node.get_install_dir()))
         # needed for jmx
         remove_perf_disable_shared_mem(node)
         # Restart nodes on new version
-        debug('Starting {} on new version ({})'.format(node.name, node.get_cassandra_version()))
+        logger.debug('Starting {} on new version ({})'.format(node.name, node.get_cassandra_version()))
         node.start(wait_other_notice=True, wait_for_binary_proto=True)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/bin/collect_known_failures.py
----------------------------------------------------------------------
diff --git a/bin/collect_known_failures.py b/bin/collect_known_failures.py
deleted file mode 100644
index cf66cd1..0000000
--- a/bin/collect_known_failures.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-A script that runs the tests with --collect-only, but instead of just printing
-the tests' names, prints the information added by the tools.decorators.known_failure
-decorator.
-
-This is basically a wrapper around the `nosetests` command, so it takes the
-same arguments, though it appends some arguments to sys.argv. In particular,
-if you want to look at particular kinds of known failures, use the `-a`
-parameter on this script as you would for any of the known_failures attributes.
-In addition, you should call it from the same directory from which you'd call
-`nosetests`.
-"""
-
-import json
-import os
-import sys
-from functools import partial
-
-import nose
-
-
-class PrintJiraURLPlugin(nose.plugins.Plugin):
-    enabled = True
-
-    def options(self, parser, env):
-        super(PrintJiraURLPlugin, self).configure(parser, env)
-
-    def testName(self, test):
-        _, test_module, test_name = test.address()
-        test_method_name = test_name.split('.')[-1]
-        test_method = getattr(test.test, test_method_name)
-
-        get_attr_for_current_method = partial(
-            nose.plugins.attrib.get_method_attr,
-            method=test_method,
-            cls=test.test,
-        )
-
-        failure_annotations = get_attr_for_current_method(attr_name='known_failure')
-
-        return json.dumps({
-            'module': test_module,
-            'name': test_name,
-            'failure_annotations': failure_annotations
-        })
-
-
-if __name__ == '__main__':
-    argv = sys.argv + ['--collect-only', '-v']
-
-    # The tests need a CASSANDRA_VERSION or CASSANDRA_DIR environment variable
-    # to run at all, so we specify it here. However, we have to do so by
-    # modifying os.environ, rather than using the env parameter to nose.main,
-    # because env does not do what you think it does:
-    # http://stackoverflow.com/a/28611124
-    os.environ['CASSANDRA_VERSION'] = 'git:trunk'
-
-    nose.main(addplugins=[PrintJiraURLPlugin()], argv=argv)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/bootstrap_test.py
----------------------------------------------------------------------
diff --git a/bootstrap_test.py b/bootstrap_test.py
index efa84ec..22dddcd 100644
--- a/bootstrap_test.py
+++ b/bootstrap_test.py
@@ -5,34 +5,40 @@ import shutil
 import tempfile
 import threading
 import time
+import logging
+import signal
 
 from cassandra import ConsistencyLevel
 from cassandra.concurrent import execute_concurrent_with_args
 from ccmlib.node import NodeError
 
-from dtest import DISABLE_VNODES, Tester, debug, create_ks, create_cf
+import pytest
+
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import (assert_almost_equal, assert_bootstrap_state, assert_not_running,
                               assert_one, assert_stderr_clean)
 from tools.data import query_c1c2
-from tools.decorators import no_vnodes, since
 from tools.intervention import InterruptBootstrap, KillOnBootstrap
 from tools.misc import new_node
-from tools.misc import generate_ssl_stores
-
-
-class BaseBootstrapTest(Tester):
-    __test__ = False
-
-    allow_log_errors = True
-    ignore_log_patterns = (
-        # This one occurs when trying to send the migration to a
-        # node that hasn't started yet, and when it does, it gets
-        # replayed and everything is fine.
-        r'Can\'t send migration request: node.*is down',
-        # ignore streaming error during bootstrap
-        r'Exception encountered during startup',
-        r'Streaming error occurred'
-    )
+from tools.misc import generate_ssl_stores, retry_till_success
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
+class TestBootstrap(Tester):
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.allow_log_errors = True
+        fixture_dtest_setup.ignore_log_patterns = (
+            # This one occurs when trying to send the migration to a
+            # node that hasn't started yet, and when it does, it gets
+            # replayed and everything is fine.
+            r'Can\'t send migration request: node.*is down',
+            # ignore streaming error during bootstrap
+            r'Exception encountered during startup',
+            r'Streaming error occurred'
+        )
 
     def _base_bootstrap_test(self, bootstrap=None, bootstrap_from_version=None,
                              enable_ssl=None):
@@ -48,14 +54,14 @@ class BaseBootstrapTest(Tester):
         cluster = self.cluster
 
         if enable_ssl:
-            debug("***using internode ssl***")
-            generate_ssl_stores(self.test_path)
-            cluster.enable_internode_ssl(self.test_path)
+            logger.debug("***using internode ssl***")
+            generate_ssl_stores(self.fixture_dtest_setup.test_path)
+            cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path)
 
         tokens = cluster.balanced_tokens(2)
         cluster.set_configuration_options(values={'num_tokens': 1})
 
-        debug("[node1, node2] tokens: %r" % (tokens,))
+        logger.debug("[node1, node2] tokens: %r" % (tokens,))
 
         keys = 10000
 
@@ -63,7 +69,7 @@ class BaseBootstrapTest(Tester):
         cluster.populate(1)
         node1 = cluster.nodelist()[0]
         if bootstrap_from_version:
-            debug("starting source node on version {}".format(bootstrap_from_version))
+            logger.debug("starting source node on version {}".format(bootstrap_from_version))
             node1.set_install_dir(version=bootstrap_from_version)
         node1.set_configuration_options(values={'initial_token': tokens[0]})
         cluster.start(wait_other_notice=True)
@@ -74,7 +80,7 @@ class BaseBootstrapTest(Tester):
 
         # record the size before inserting any of our own data
         empty_size = node1.data_size()
-        debug("node1 empty size : %s" % float(empty_size))
+        logger.debug("node1 empty size : %s" % float(empty_size))
 
         insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')")
         execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)])
@@ -82,25 +88,23 @@ class BaseBootstrapTest(Tester):
         node1.flush()
         node1.compact()
         initial_size = node1.data_size()
-        debug("node1 size before bootstrapping node2: %s" % float(initial_size))
+        logger.debug("node1 size before bootstrapping node2: %s" % float(initial_size))
 
         # Reads inserted data all during the bootstrap process. We shouldn't
         # get any error
-        reader = self.go(lambda _: query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE))
+        query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE)
+        session.shutdown()
 
         # Bootstrapping a new node in the current version
         node2 = bootstrap(cluster, tokens[1])
         node2.compact()
 
-        reader.check()
         node1.cleanup()
-        debug("node1 size after cleanup: %s" % float(node1.data_size()))
+        logger.debug("node1 size after cleanup: %s" % float(node1.data_size()))
         node1.compact()
-        debug("node1 size after compacting: %s" % float(node1.data_size()))
-        time.sleep(.5)
-        reader.check()
+        logger.debug("node1 size after compacting: %s" % float(node1.data_size()))
 
-        debug("node2 size after compacting: %s" % float(node2.data_size()))
+        logger.debug("node2 size after compacting: %s" % float(node2.data_size()))
 
         size1 = float(node1.data_size())
         size2 = float(node2.data_size())
@@ -108,40 +112,34 @@ class BaseBootstrapTest(Tester):
         assert_almost_equal(float(initial_size - empty_size), 2 * (size1 - float(empty_size)))
 
         assert_bootstrap_state(self, node2, 'COMPLETED')
-        if bootstrap_from_version:
-            self.assertTrue(node2.grep_log('does not support keep-alive', filename='debug.log'))
 
-
-class TestBootstrap(BaseBootstrapTest):
-    __test__ = True
-
-    @no_vnodes()
-    def simple_bootstrap_test_with_ssl(self):
+    @pytest.mark.no_vnodes
+    def test_simple_bootstrap_with_ssl(self):
         self._base_bootstrap_test(enable_ssl=True)
 
-    @no_vnodes()
-    def simple_bootstrap_test(self):
+    @pytest.mark.no_vnodes
+    def test_simple_bootstrap(self):
         self._base_bootstrap_test()
 
-    @no_vnodes()
-    def bootstrap_on_write_survey_test(self):
+    @pytest.mark.no_vnodes
+    def test_bootstrap_on_write_survey(self):
         def bootstrap_on_write_survey_and_join(cluster, token):
             node2 = new_node(cluster)
             node2.set_configuration_options(values={'initial_token': token})
             node2.start(jvm_args=["-Dcassandra.write_survey=true"], wait_for_binary_proto=True)
 
-            self.assertTrue(len(node2.grep_log('Startup complete, but write survey mode is active, not becoming an active ring member.')))
+            assert len(node2.grep_log('Startup complete, but write survey mode is active, not becoming an active ring member.'))
             assert_bootstrap_state(self, node2, 'IN_PROGRESS')
 
             node2.nodetool("join")
-            self.assertTrue(len(node2.grep_log('Leaving write survey mode and joining ring at operator request')))
+            assert len(node2.grep_log('Leaving write survey mode and joining ring at operator request'))
             return node2
 
         self._base_bootstrap_test(bootstrap_on_write_survey_and_join)
 
     @since('3.10')
-    @no_vnodes()
-    def simple_bootstrap_test_small_keepalive_period(self):
+    @pytest.mark.no_vnodes
+    def test_simple_bootstrap_small_keepalive_period(self):
         """
         @jira_ticket CASSANDRA-11841
         Test that bootstrap completes if it takes longer than streaming_socket_timeout_in_ms or
@@ -157,7 +155,7 @@ class TestBootstrap(BaseBootstrapTest):
         cluster.populate(1)
         node1 = cluster.nodelist()[0]
 
-        debug("Setting up byteman on {}".format(node1.name))
+        logger.debug("Setting up byteman on {}".format(node1.name))
         # set up byteman
         node1.byteman_port = '8100'
         node1.import_config_files()
@@ -169,7 +167,7 @@ class TestBootstrap(BaseBootstrapTest):
                       'compaction(strategy=SizeTieredCompactionStrategy, enabled=false)'])
         cluster.flush()
 
-        debug("Submitting byteman script to {} to".format(node1.name))
+        logger.debug("Submitting byteman script to {} to".format(node1.name))
         # Sleep longer than streaming_socket_timeout_in_ms to make sure the node will not be killed
         node1.byteman_submit(['./byteman/stream_5s_sleep.btm'])
 
@@ -181,16 +179,15 @@ class TestBootstrap(BaseBootstrapTest):
         assert_bootstrap_state(self, node2, 'COMPLETED')
 
         for node in cluster.nodelist():
-            self.assertTrue(node.grep_log('Scheduling keep-alive task with 2s period.', filename='debug.log'))
-            self.assertTrue(node.grep_log('Sending keep-alive', filename='debug.log'))
-            self.assertTrue(node.grep_log('Received keep-alive', filename='debug.log'))
+            assert node.grep_log('Scheduling keep-alive task with 2s period.', filename='debug.log')
+            assert node.grep_log('Sending keep-alive', filename='debug.log')
+            assert node.grep_log('Received keep-alive', filename='debug.log')
 
-    def simple_bootstrap_test_nodata(self):
+    def test_simple_bootstrap_nodata(self):
         """
         @jira_ticket CASSANDRA-11010
         Test that bootstrap completes if streaming from nodes with no data
         """
-
         cluster = self.cluster
         # Create a two-node cluster
         cluster.populate(2)
@@ -202,7 +199,7 @@ class TestBootstrap(BaseBootstrapTest):
 
         assert_bootstrap_state(self, node3, 'COMPLETED')
 
-    def read_from_bootstrapped_node_test(self):
+    def test_read_from_bootstrapped_node(self):
         """
         Test bootstrapped node sees existing data
         @jira_ticket CASSANDRA-6648
@@ -223,18 +220,18 @@ class TestBootstrap(BaseBootstrapTest):
 
         session = self.patient_exclusive_cql_connection(node4)
         new_rows = list(session.execute("SELECT * FROM %s" % (stress_table,)))
-        self.assertEquals(original_rows, new_rows)
+        assert original_rows == new_rows
 
-    def consistent_range_movement_true_with_replica_down_should_fail_test(self):
+    def test_consistent_range_movement_true_with_replica_down_should_fail(self):
         self._bootstrap_test_with_replica_down(True)
 
-    def consistent_range_movement_false_with_replica_down_should_succeed_test(self):
+    def test_consistent_range_movement_false_with_replica_down_should_succeed(self):
         self._bootstrap_test_with_replica_down(False)
 
-    def consistent_range_movement_true_with_rf1_should_fail_test(self):
+    def test_consistent_range_movement_true_with_rf1_should_fail(self):
         self._bootstrap_test_with_replica_down(True, rf=1)
 
-    def consistent_range_movement_false_with_rf1_should_succeed_test(self):
+    def test_consistent_range_movement_false_with_rf1_should_succeed(self):
         self._bootstrap_test_with_replica_down(False, rf=1)
 
     def _bootstrap_test_with_replica_down(self, consistent_range_movement, rf=2):
@@ -249,10 +246,10 @@ class TestBootstrap(BaseBootstrapTest):
 
         node3_token = None
         # Make token assignment deterministic
-        if DISABLE_VNODES:
+        if not self.dtest_config.use_vnodes:
             cluster.set_configuration_options(values={'num_tokens': 1})
             tokens = cluster.balanced_tokens(3)
-            debug("non-vnode tokens: %r" % (tokens,))
+            logger.debug("non-vnode tokens: %r" % (tokens,))
             node1.set_configuration_options(values={'initial_token': tokens[0]})
             node2.set_configuration_options(values={'initial_token': tokens[2]})
             node3_token = tokens[1]  # Add node 3 between node1 and node2
@@ -283,7 +280,7 @@ class TestBootstrap(BaseBootstrapTest):
             # with rf=1 and cassandra.consistent.rangemovement=false, missing sources are ignored
             if not consistent_range_movement and rf == 1:
                 node3.watch_log_for("Unable to find sufficient sources for streaming range")
-            self.assertTrue(node3.is_running())
+            assert node3.is_running()
             assert_bootstrap_state(self, node3, 'COMPLETED')
         else:
             if consistent_range_movement:
@@ -293,11 +290,10 @@ class TestBootstrap(BaseBootstrapTest):
             assert_not_running(node3)
 
     @since('2.2')
-    def resumable_bootstrap_test(self):
+    def test_resumable_bootstrap(self):
         """
         Test resuming bootstrap after data streaming failure
         """
-
         cluster = self.cluster
         cluster.populate(2)
 
@@ -323,7 +319,7 @@ class TestBootstrap(BaseBootstrapTest):
         node3.watch_log_for("Starting listening for CQL clients")
         mark = node3.mark_log()
         # check if node3 is still in bootstrap mode
-        assert_bootstrap_state(self, node3, 'IN_PROGRESS')
+        retry_till_success(assert_bootstrap_state, tester=self, node=node3, expected_bootstrap_state='IN_PROGRESS', timeout=120)
 
         # bring back node1 and invoke nodetool bootstrap to resume bootstrapping
         node3.nodetool('bootstrap resume')
@@ -334,17 +330,16 @@ class TestBootstrap(BaseBootstrapTest):
         # cleanup to guarantee each node will only have sstables of its ranges
         cluster.cleanup()
 
-        debug("Check data is present")
+        logger.debug("Check data is present")
         # Let's check stream bootstrap completely transferred data
         stdout, stderr, _ = node3.stress(['read', 'n=1k', 'no-warmup', '-schema', 'replication(factor=2)', '-rate', 'threads=8'])
 
         if stdout is not None:
-            self.assertNotIn("FAILURE", stdout)
+            assert "FAILURE" not in stdout.decode("utf-8")
 
     @since('2.2')
-    def bootstrap_with_reset_bootstrap_state_test(self):
+    def test_bootstrap_with_reset_bootstrap_state(self):
         """Test bootstrap with resetting bootstrap progress"""
-
         cluster = self.cluster
         cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
         cluster.populate(2).start(wait_other_notice=True)
@@ -367,7 +362,7 @@ class TestBootstrap(BaseBootstrapTest):
         node1.start()
 
         # restart node3 bootstrap with resetting bootstrap progress
-        node3.stop()
+        node3.stop(signal_event=signal.SIGKILL)
         mark = node3.mark_log()
         node3.start(jvm_args=["-Dcassandra.reset_bootstrap_progress=true"])
         # check if we reset bootstrap state
@@ -378,7 +373,7 @@ class TestBootstrap(BaseBootstrapTest):
         # check if 2nd bootstrap succeeded
         assert_bootstrap_state(self, node3, 'COMPLETED')
 
-    def manual_bootstrap_test(self):
+    def test_manual_bootstrap(self):
         """
             Test adding a new node and bootstrapping it manually. No auto_bootstrap.
             This test also verify that all data are OK after the addition of the new node.
@@ -403,14 +398,13 @@ class TestBootstrap(BaseBootstrapTest):
         node1.cleanup()
 
         current_rows = list(session.execute("SELECT * FROM %s" % stress_table))
-        self.assertEquals(original_rows, current_rows)
+        assert original_rows == current_rows
 
-    def local_quorum_bootstrap_test(self):
+    def test_local_quorum_bootstrap(self):
         """
         Test that CL local_quorum works while a node is bootstrapping.
         @jira_ticket CASSANDRA-8058
         """
-
         cluster = self.cluster
         cluster.populate([1, 1])
         cluster.start()
@@ -453,16 +447,16 @@ class TestBootstrap(BaseBootstrapTest):
                                         '-rate', 'threads=5',
                                         '-errors', 'retries=2'])
 
-        debug(out)
+        logger.debug(out)
         assert_stderr_clean(err)
         regex = re.compile("Operation.+error inserting key.+Exception")
-        failure = regex.search(out)
-        self.assertIsNone(failure, "Error during stress while bootstrapping")
+        failure = regex.search(str(out))
+        assert failure is None, "Error during stress while bootstrapping"
 
-    def shutdown_wiped_node_cannot_join_test(self):
+    def test_shutdown_wiped_node_cannot_join(self):
         self._wiped_node_cannot_join_test(gently=True)
 
-    def killed_wiped_node_cannot_join_test(self):
+    def test_killed_wiped_node_cannot_join(self):
         self._wiped_node_cannot_join_test(gently=False)
 
     def _wiped_node_cannot_join_test(self, gently):
@@ -490,7 +484,7 @@ class TestBootstrap(BaseBootstrapTest):
         node4.start(wait_for_binary_proto=True)
 
         session = self.patient_cql_connection(node4)
-        self.assertEquals(original_rows, list(session.execute("SELECT * FROM {}".format(stress_table,))))
+        assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,)))
 
         # Stop the new node and wipe its data
         node4.stop(gently=gently)
@@ -500,7 +494,7 @@ class TestBootstrap(BaseBootstrapTest):
         node4.start(no_wait=True, wait_other_notice=False)
         node4.watch_log_for("A node with address {} already exists, cancelling join".format(node4.address_for_current_version_slashy()), from_mark=mark)
 
-    def decommissioned_wiped_node_can_join_test(self):
+    def test_decommissioned_wiped_node_can_join(self):
         """
         @jira_ticket CASSANDRA-9765
         Test that if we decommission a node and then wipe its data, it can join the cluster.
@@ -523,7 +517,7 @@ class TestBootstrap(BaseBootstrapTest):
         node4.start(wait_for_binary_proto=True, wait_other_notice=True)
 
         session = self.patient_cql_connection(node4)
-        self.assertEquals(original_rows, list(session.execute("SELECT * FROM {}".format(stress_table,))))
+        assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,)))
 
         # Decommission the new node and wipe its data
         node4.decommission()
@@ -534,7 +528,7 @@ class TestBootstrap(BaseBootstrapTest):
         node4.start(wait_other_notice=True)
         node4.watch_log_for("JOINING:", from_mark=mark)
 
-    def decommissioned_wiped_node_can_gossip_to_single_seed_test(self):
+    def test_decommissioned_wiped_node_can_gossip_to_single_seed(self):
         """
         @jira_ticket CASSANDRA-8072
         @jira_ticket CASSANDRA-8422
@@ -559,26 +553,26 @@ class TestBootstrap(BaseBootstrapTest):
         session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")
 
         # Decommision the new node and kill it
-        debug("Decommissioning & stopping node2")
+        logger.debug("Decommissioning & stopping node2")
         node2.decommission()
         node2.stop(wait_other_notice=False)
 
         # Wipe its data
         for data_dir in node2.data_directories():
-            debug("Deleting {}".format(data_dir))
+            logger.debug("Deleting {}".format(data_dir))
             shutil.rmtree(data_dir)
 
         commitlog_dir = os.path.join(node2.get_path(), 'commitlogs')
-        debug("Deleting {}".format(commitlog_dir))
+        logger.debug("Deleting {}".format(commitlog_dir))
         shutil.rmtree(commitlog_dir)
 
         # Now start it, it should be allowed to join
         mark = node2.mark_log()
-        debug("Restarting wiped node2")
+        logger.debug("Restarting wiped node2")
         node2.start(wait_other_notice=False)
         node2.watch_log_for("JOINING:", from_mark=mark)
 
-    def failed_bootstrap_wiped_node_can_join_test(self):
+    def test_failed_bootstrap_wiped_node_can_join(self):
         """
         @jira_ticket CASSANDRA-9765
         Test that if a node fails to bootstrap, it can join the cluster even if the data is wiped.
@@ -607,7 +601,7 @@ class TestBootstrap(BaseBootstrapTest):
 
         node2.start()
         t.join()
-        self.assertFalse(node2.is_running())
+        assert not node2.is_running()
 
         # wipe any data for node2
         self._cleanup(node2)
@@ -617,7 +611,7 @@ class TestBootstrap(BaseBootstrapTest):
         node2.watch_log_for("JOINING:", from_mark=mark)
 
     @since('2.1.1')
-    def simultaneous_bootstrap_test(self):
+    def test_simultaneous_bootstrap(self):
         """
         Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere.
 
@@ -660,7 +654,7 @@ class TestBootstrap(BaseBootstrapTest):
         # Repeat the select count(*) query, to help catch
         # bugs like 9484, where count(*) fails at higher
         # data loads.
-        for _ in xrange(5):
+        for _ in range(5):
             assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)
 
     def test_cleanup(self):
@@ -673,7 +667,7 @@ class TestBootstrap(BaseBootstrapTest):
         cluster.populate(1)
         cluster.start(wait_for_binary_proto=True)
         node1, = cluster.nodelist()
-        for x in xrange(0, 5):
+        for x in range(0, 5):
             node1.stress(['write', 'n=100k', 'no-warmup', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', 'replication(factor=1)', '-rate', 'threads=10'])
             node1.flush()
         node2 = new_node(cluster)
@@ -682,20 +676,21 @@ class TestBootstrap(BaseBootstrapTest):
         failed = threading.Event()
         jobs = 1
         thread = threading.Thread(target=self._monitor_datadir, args=(node1, event, len(node1.get_sstables("keyspace1", "standard1")), jobs, failed))
+        thread.setDaemon(True)
         thread.start()
         node1.nodetool("cleanup -j {} keyspace1 standard1".format(jobs))
         event.set()
         thread.join()
-        self.assertFalse(failed.is_set())
+        assert not failed.is_set()
 
     def _monitor_datadir(self, node, event, basecount, jobs, failed):
         while True:
             sstables = [s for s in node.get_sstables("keyspace1", "standard1") if "tmplink" not in s]
-            debug("---")
+            logger.debug("---")
             for sstable in sstables:
-                debug(sstable)
+                logger.debug(sstable)
             if len(sstables) > basecount + jobs:
-                debug("Current count is {}, basecount was {}".format(len(sstables), basecount))
+                logger.debug("Current count is {}, basecount was {}".format(len(sstables), basecount))
                 failed.set()
                 return
             if event.is_set():
@@ -705,6 +700,6 @@ class TestBootstrap(BaseBootstrapTest):
     def _cleanup(self, node):
         commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
         for data_dir in node.data_directories():
-            debug("Deleting {}".format(data_dir))
+            logger.debug("Deleting {}".format(data_dir))
             shutil.rmtree(data_dir)
         shutil.rmtree(commitlog_dir)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[18/36] cassandra-dtest git commit: Migrate dtests to use pytest and python3

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/scrub_test.py
----------------------------------------------------------------------
diff --git a/scrub_test.py b/scrub_test.py
index 4f9d22b..2df11c7 100644
--- a/scrub_test.py
+++ b/scrub_test.py
@@ -4,28 +4,33 @@ import re
 import subprocess
 import time
 import uuid
-
+import pytest
 import parse
+import logging
+
 from ccmlib import common
 
-from dtest import Tester, debug, create_ks, create_cf
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import assert_length_equal, assert_stderr_clean
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 KEYSPACE = 'ks'
 
 
 class TestHelper(Tester):
 
-    def setUp(self):
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_set_cluster_settings(self, fixture_dtest_setup):
         """
         disable JBOD configuration for scrub tests.
         range-aware JBOD can skip generation in SSTable,
         and some tests rely on generation numbers/
         (see CASSANDRA-11693 and increase_sstable_generations)
         """
-        super(TestHelper, self).setUp()
-        self.cluster.set_datadir_count(1)
+        fixture_dtest_setup.cluster.set_datadir_count(1)
+
 
     def get_table_paths(self, table):
         """
@@ -56,13 +61,13 @@ class TestHelper(Tester):
         Return the sstable files at a specific location
         """
         ret = []
-        debug('Checking sstables in {}'.format(paths))
+        logger.debug('Checking sstables in {}'.format(paths))
 
         for ext in ('*.db', '*.txt', '*.adler32', '*.sha1'):
             for path in paths:
                 for fname in glob.glob(os.path.join(path, ext)):
                     bname = os.path.basename(fname)
-                    debug('Found sstable file {}'.format(bname))
+                    logger.debug('Found sstable file {}'.format(bname))
                     ret.append(bname)
         return ret
 
@@ -77,7 +82,7 @@ class TestHelper(Tester):
                 for path in paths:
                     fullname = os.path.join(path, fname)
                     if (os.path.exists(fullname)):
-                        debug('Deleting {}'.format(fullname))
+                        logger.debug('Deleting {}'.format(fullname))
                         os.remove(fullname)
 
     def get_sstables(self, table, indexes):
@@ -86,12 +91,12 @@ class TestHelper(Tester):
         """
         sstables = {}
         table_sstables = self.get_sstable_files(self.get_table_paths(table))
-        self.assertGreater(len(table_sstables), 0)
+        assert len(table_sstables) > 0
         sstables[table] = sorted(table_sstables)
 
         for index in indexes:
             index_sstables = self.get_sstable_files(self.get_index_paths(table, index))
-            self.assertGreater(len(index_sstables), 0)
+            assert len(index_sstables) > 0
             sstables[index] = sorted('{}/{}'.format(index, sstable) for sstable in index_sstables)
 
         return sstables
@@ -112,16 +117,16 @@ class TestHelper(Tester):
         node1 = self.cluster.nodelist()[0]
         env = common.make_cassandra_env(node1.get_install_cassandra_root(), node1.get_node_cassandra_root())
         scrub_bin = node1.get_tool('sstablescrub')
-        debug(scrub_bin)
+        logger.debug(scrub_bin)
 
         args = [scrub_bin, ks, cf]
         p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         out, err = p.communicate()
-        debug(out)
+        logger.debug(out.decode("utf-8"))
         # if we have less than 64G free space, we get this warning - ignore it
-        if err and "Consider adding more capacity" not in err:
-            debug(err)
-            assert_stderr_clean(err)
+        if err and "Consider adding more capacity" not in err.decode("utf-8"):
+            logger.debug(err.decode("utf-8"))
+            assert_stderr_clean(err.decode("utf-8"))
 
     def perform_node_tool_cmd(self, cmd, table, indexes):
         """
@@ -169,11 +174,11 @@ class TestHelper(Tester):
         After finding the number of existing sstables, increase all of the
         generations by that amount.
         """
-        for table_or_index, table_sstables in sstables.items():
+        for table_or_index, table_sstables in list(sstables.items()):
             increment_by = len(set(parse.search('{}-{increment_by}-{suffix}.{file_extention}', s).named['increment_by'] for s in table_sstables))
             sstables[table_or_index] = [self.increment_generation_by(s, increment_by) for s in table_sstables]
 
-        debug('sstables after increment {}'.format(str(sstables)))
+        logger.debug('sstables after increment {}'.format(str(sstables)))
 
 
 @since('2.2')
@@ -228,18 +233,18 @@ class TestScrubIndexes(TestHelper):
         scrubbed_sstables = self.scrub('users', 'gender_idx', 'state_idx', 'birth_year_idx')
 
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
         # Scrub and check sstables and data again
         scrubbed_sstables = self.scrub('users', 'gender_idx', 'state_idx', 'birth_year_idx')
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
         # Restart and check data again
         cluster.stop()
@@ -249,7 +254,7 @@ class TestScrubIndexes(TestHelper):
         session.execute('USE {}'.format(KEYSPACE))
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
     def test_standalone_scrub(self):
         cluster = self.cluster
@@ -269,14 +274,14 @@ class TestScrubIndexes(TestHelper):
 
         scrubbed_sstables = self.standalonescrub('users', 'gender_idx', 'state_idx', 'birth_year_idx')
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         cluster.start()
         session = self.patient_cql_connection(node1)
         session.execute('USE {}'.format(KEYSPACE))
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
     def test_scrub_collections_table(self):
         cluster = self.cluster
@@ -297,25 +302,25 @@ class TestScrubIndexes(TestHelper):
             session.execute(("UPDATE users set uuids = [{id}] where user_id = {user_id}").format(id=_id, user_id=user_uuid))
 
         initial_users = list(session.execute(("SELECT * from users where uuids contains {some_uuid}").format(some_uuid=_id)))
-        self.assertEqual(num_users, len(initial_users))
+        assert num_users == len(initial_users)
 
         initial_sstables = self.flush('users', 'user_uuids_idx')
         scrubbed_sstables = self.scrub('users', 'user_uuids_idx')
 
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         users = list(session.execute(("SELECT * from users where uuids contains {some_uuid}").format(some_uuid=_id)))
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
         scrubbed_sstables = self.scrub('users', 'user_uuids_idx')
 
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         users = list(session.execute(("SELECT * from users where uuids contains {some_uuid}").format(some_uuid=_id)))
 
-        self.assertListEqual(initial_users, users)
+        assert initial_users == users
 
 
 class TestScrub(TestHelper):
@@ -365,18 +370,18 @@ class TestScrub(TestHelper):
         scrubbed_sstables = self.scrub('users')
 
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
         # Scrub and check sstables and data again
         scrubbed_sstables = self.scrub('users')
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
         # Restart and check data again
         cluster.stop()
@@ -386,7 +391,7 @@ class TestScrub(TestHelper):
         session.execute('USE {}'.format(KEYSPACE))
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
     def test_standalone_scrub(self):
         cluster = self.cluster
@@ -406,14 +411,14 @@ class TestScrub(TestHelper):
 
         scrubbed_sstables = self.standalonescrub('users')
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         cluster.start()
         session = self.patient_cql_connection(node1)
         session.execute('USE {}'.format(KEYSPACE))
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
     def test_standalone_scrub_essential_files_only(self):
         cluster = self.cluster
@@ -435,14 +440,14 @@ class TestScrub(TestHelper):
 
         scrubbed_sstables = self.standalonescrub('users')
         self.increase_sstable_generations(initial_sstables)
-        self.assertEqual(initial_sstables, scrubbed_sstables)
+        assert initial_sstables == scrubbed_sstables
 
         cluster.start()
         session = self.patient_cql_connection(node1)
         session.execute('USE {}'.format(KEYSPACE))
 
         users = self.query_users(session)
-        self.assertEqual(initial_users, users)
+        assert initial_users == users
 
     def test_scrub_with_UDT(self):
         """
@@ -460,4 +465,4 @@ class TestScrub(TestHelper):
         node1.nodetool("scrub")
         time.sleep(2)
         match = node1.grep_log("org.apache.cassandra.serializers.MarshalException: Not enough bytes to read a set")
-        self.assertEqual(len(match), 0)
+        assert len(match) == 0

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/secondary_indexes_test.py
----------------------------------------------------------------------
diff --git a/secondary_indexes_test.py b/secondary_indexes_test.py
index 9cb6cc0..9b0f326 100644
--- a/secondary_indexes_test.py
+++ b/secondary_indexes_test.py
@@ -3,7 +3,10 @@ import random
 import re
 import time
 import uuid
-from unittest import skipIf
+import pytest
+import logging
+
+from flaky import flaky
 
 from cassandra import InvalidRequest
 from cassandra.concurrent import (execute_concurrent,
@@ -11,12 +14,13 @@ from cassandra.concurrent import (execute_concurrent,
 from cassandra.protocol import ConfigurationException
 from cassandra.query import BatchStatement, SimpleStatement
 
-from dtest import (DISABLE_VNODES, OFFHEAP_MEMTABLES, Tester, debug, CASSANDRA_VERSION_FROM_BUILD, create_ks, create_cf)
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import assert_bootstrap_state, assert_invalid, assert_none, assert_one, assert_row_count
 from tools.data import block_until_index_is_built, rows_to_list
-from tools.decorators import since
 from tools.misc import new_node
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 class TestSecondaryIndexes(Tester):
 
@@ -30,7 +34,7 @@ class TestSecondaryIndexes(Tester):
             files.extend(os.listdir(index_sstables_dir))
         return set(files)
 
-    def data_created_before_index_not_returned_in_where_query_test(self):
+    def test_data_created_before_index_not_returned_in_where_query(self):
         """
         @jira_ticket CASSANDRA-3367
         """
@@ -79,9 +83,10 @@ class TestSecondaryIndexes(Tester):
         session.execute("CREATE INDEX b_index ON ks.cf (b);")
         num_rows = 100
         for i in range(num_rows):
-            indexed_value = i % (num_rows / 3)
+            indexed_value = i % (num_rows // 3)
             # use the same indexed value three times
-            session.execute("INSERT INTO ks.cf (a, b) VALUES ('%d', '%d');" % (i, indexed_value))
+            session.execute("INSERT INTO ks.cf (a, b) VALUES ('{a}', '{b}');"
+                            .format(a=i, b=indexed_value))
 
         cluster.flush()
 
@@ -99,31 +104,32 @@ class TestSecondaryIndexes(Tester):
                 if match:
                     concurrency = int(match.group(1))
                     expected_per_range = float(match.group(2))
-                    self.assertTrue(concurrency > 1, "Expected more than 1 concurrent range request, got %d" % concurrency)
-                    self.assertTrue(expected_per_range > 0)
+                    assert concurrency > 1, "Expected more than 1 concurrent range request, got %d" % concurrency
+                    assert expected_per_range > 0
                     break
             else:
                 self.fail("Didn't find matching trace event")
 
         query = SimpleStatement("SELECT * FROM ks.cf WHERE b='1';")
         result = session.execute(query, trace=True)
-        self.assertEqual(3, len(list(result)))
+        assert 3 == len(list(result))
         check_trace_events(result.get_query_trace())
 
         query = SimpleStatement("SELECT * FROM ks.cf WHERE b='1' LIMIT 100;")
         result = session.execute(query, trace=True)
-        self.assertEqual(3, len(list(result)))
+        assert 3 == len(list(result))
         check_trace_events(result.get_query_trace())
 
         query = SimpleStatement("SELECT * FROM ks.cf WHERE b='1' LIMIT 3;")
         result = session.execute(query, trace=True)
-        self.assertEqual(3, len(list(result)))
+        assert 3 == len(list(result))
         check_trace_events(result.get_query_trace())
 
         for limit in (1, 2):
             result = list(session.execute("SELECT * FROM ks.cf WHERE b='1' LIMIT %d;" % (limit,)))
-            self.assertEqual(limit, len(result))
+            assert limit == len(result)
 
+    @flaky(3)
     def test_6924_dropping_ks(self):
         """
         @jira_ticket CASSANDRA-6924
@@ -152,7 +158,7 @@ class TestSecondaryIndexes(Tester):
         # This only occurs when dropping and recreating with
         # the same name, so loop through this test a few times:
         for i in range(10):
-            debug("round %s" % i)
+            logger.debug("round %s" % i)
             try:
                 session.execute("DROP KEYSPACE ks")
             except ConfigurationException:
@@ -170,8 +176,9 @@ class TestSecondaryIndexes(Tester):
 
             rows = session.execute("select count(*) from ks.cf WHERE col1='asdf'")
             count = rows[0][0]
-            self.assertEqual(count, 10)
+            assert count == 10
 
+    @flaky
     def test_6924_dropping_cf(self):
         """
         @jira_ticket CASSANDRA-6924
@@ -190,7 +197,7 @@ class TestSecondaryIndexes(Tester):
         # This only occurs when dropping and recreating with
         # the same name, so loop through this test a few times:
         for i in range(10):
-            debug("round %s" % i)
+            logger.debug("round %s" % i)
             try:
                 session.execute("DROP COLUMNFAMILY ks.cf")
             except InvalidRequest:
@@ -207,7 +214,7 @@ class TestSecondaryIndexes(Tester):
 
             rows = session.execute("select count(*) from ks.cf WHERE col1='asdf'")
             count = rows[0][0]
-            self.assertEqual(count, 10)
+            assert count == 10
 
     def test_8280_validate_indexed_values(self):
         """
@@ -282,21 +289,8 @@ class TestSecondaryIndexes(Tester):
             pass
 
     def wait_for_schema_agreement(self, session):
-        rows = list(session.execute("SELECT schema_version FROM system.local"))
-        local_version = rows[0]
-
-        all_match = True
-        rows = list(session.execute("SELECT schema_version FROM system.peers"))
-        for peer_version in rows:
-            if peer_version != local_version:
-                all_match = False
-                break
-
-        if all_match:
-            return
-        else:
-            time.sleep(1)
-            self.wait_for_schema_agreement(session)
+        if not session.cluster.control_connection.wait_for_schema_agreement(wait_time=120):
+            raise AssertionError("Failed to reach schema agreement")
 
     @since('3.0')
     def test_manual_rebuild_index(self):
@@ -316,18 +310,18 @@ class TestSecondaryIndexes(Tester):
         block_until_index_is_built(node1, session, 'keyspace1', 'standard1', 'ix_c0')
 
         stmt = session.prepare('select * from standard1 where "C0" = ?')
-        self.assertEqual(1, len(list(session.execute(stmt, [lookup_value]))))
+        assert 1 == len(list(session.execute(stmt, [lookup_value])))
         before_files = self._index_sstables_files(node1, 'keyspace1', 'standard1', 'ix_c0')
 
         node1.nodetool("rebuild_index keyspace1 standard1 ix_c0")
         block_until_index_is_built(node1, session, 'keyspace1', 'standard1', 'ix_c0')
 
         after_files = self._index_sstables_files(node1, 'keyspace1', 'standard1', 'ix_c0')
-        self.assertNotEqual(before_files, after_files)
-        self.assertEqual(1, len(list(session.execute(stmt, [lookup_value]))))
+        assert before_files != after_files
+        assert 1 == len(list(session.execute(stmt, [lookup_value])))
 
         # verify that only the expected row is present in the build indexes table
-        self.assertEqual(1, len(list(session.execute("""SELECT * FROM system."IndexInfo";"""))))
+        assert 1 == len(list(session.execute("""SELECT * FROM system."IndexInfo";""")))
 
     @since('4.0')
     def test_failing_manual_rebuild_index(self):
@@ -355,12 +349,12 @@ class TestSecondaryIndexes(Tester):
         # Simulate a failing index rebuild
         before_files = self._index_sstables_files(node, 'k', 't', 'idx')
         node.byteman_submit(['./byteman/index_build_failure.btm'])
-        with self.assertRaises(Exception):
+        with pytest.raises(Exception):
             node.nodetool("rebuild_index k t idx")
         after_files = self._index_sstables_files(node, 'k', 't', 'idx')
 
         # Verify that the index is not rebuilt, not marked as built, and it still can answer queries
-        self.assertEqual(before_files, after_files)
+        assert before_files == after_files
         assert_none(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""")
         assert_one(session, "SELECT * FROM k.t WHERE v = 1", [0, 1])
 
@@ -374,19 +368,19 @@ class TestSecondaryIndexes(Tester):
         after_files = self._index_sstables_files(node, 'k', 't', 'idx')
 
         # Verify that, the index is rebuilt, marked as built, and it can answer queries
-        self.assertNotEqual(before_files, after_files)
+        assert before_files != after_files
         assert_one(session, """SELECT table_name, index_name FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx'])
         assert_one(session, "SELECT * FROM k.t WHERE v = 1", [0, 1])
 
         # Simulate another failing index rebuild
         before_files = self._index_sstables_files(node, 'k', 't', 'idx')
         node.byteman_submit(['./byteman/index_build_failure.btm'])
-        with self.assertRaises(Exception):
+        with pytest.raises(Exception):
             node.nodetool("rebuild_index k t idx")
         after_files = self._index_sstables_files(node, 'k', 't', 'idx')
 
         # Verify that the index is not rebuilt, not marked as built, and it still can answer queries
-        self.assertEqual(before_files, after_files)
+        assert before_files == after_files
         assert_none(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""")
         assert_one(session, "SELECT * FROM k.t WHERE v = 1", [0, 1])
 
@@ -397,7 +391,7 @@ class TestSecondaryIndexes(Tester):
         after_files = self._index_sstables_files(node, 'k', 't', 'idx')
 
         # Verify that the index is rebuilt, marked as built, and it can answer queries
-        self.assertNotEqual(before_files, after_files)
+        assert before_files != after_files
         assert_one(session, """SELECT table_name, index_name FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx'])
         assert_one(session, "SELECT * FROM k.t WHERE v = 1", [0, 1])
 
@@ -456,22 +450,22 @@ class TestSecondaryIndexes(Tester):
         session.execute("CREATE TABLE k.t (k int PRIMARY KEY, v int)")
         session.execute("INSERT INTO k.t(k, v) VALUES (0, 1)")
 
-        debug("Create the index")
+        logger.debug("Create the index")
         session.execute("CREATE INDEX idx ON k.t(v)")
         block_until_index_is_built(node, session, 'k', 't', 'idx')
         before_files = self._index_sstables_files(node, 'k', 't', 'idx')
 
-        debug("Verify the index is marked as built and it can be queried")
+        logger.debug("Verify the index is marked as built and it can be queried")
         assert_one(session, """SELECT table_name, index_name FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx'])
         assert_one(session, "SELECT * FROM k.t WHERE v = 1", [0, 1])
 
-        debug("Restart the node and verify the index build is not submitted")
+        logger.debug("Restart the node and verify the index build is not submitted")
         node.stop()
         node.start(wait_for_binary_proto=True)
         after_files = self._index_sstables_files(node, 'k', 't', 'idx')
-        self.assertEqual(before_files, after_files)
+        assert before_files == after_files
 
-        debug("Verify the index is still marked as built and it can be queried")
+        logger.debug("Verify the index is still marked as built and it can be queried")
         session = self.patient_cql_connection(node)
         assert_one(session, """SELECT table_name, index_name FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx'])
         assert_one(session, "SELECT * FROM k.t WHERE v = 1", [0, 1])
@@ -496,7 +490,7 @@ class TestSecondaryIndexes(Tester):
         session.execute("INSERT INTO tbl (id, c0, c1, c2) values (uuid(), 'a', 'e', 'f');")
 
         rows = list(session.execute("SELECT * FROM tbl WHERE c0 = 'a';"))
-        self.assertEqual(4, len(rows))
+        assert 4 == len(rows)
 
         stmt = "SELECT * FROM tbl WHERE c0 = 'a' AND c1 = 'b';"
         assert_invalid(session, stmt, "Cannot execute this query as it might involve data filtering and thus may have "
@@ -504,7 +498,7 @@ class TestSecondaryIndexes(Tester):
                                       "performance unpredictability, use ALLOW FILTERING")
 
         rows = list(session.execute("SELECT * FROM tbl WHERE c0 = 'a' AND c1 = 'b' ALLOW FILTERING;"))
-        self.assertEqual(2, len(rows))
+        assert 2 == len(rows)
 
     @since('3.0')
     def test_only_coordinator_chooses_index_for_query(self):
@@ -523,7 +517,7 @@ class TestSecondaryIndexes(Tester):
         session.execute("CREATE INDEX b_index ON ks.cf (b);")
         num_rows = 100
         for i in range(num_rows):
-            indexed_value = i % (num_rows / 3)
+            indexed_value = i % (num_rows // 3)
             # use the same indexed value three times
             session.execute("INSERT INTO ks.cf (a, b) VALUES ('{a}', '{b}');"
                             .format(a=i, b=indexed_value))
@@ -560,7 +554,7 @@ class TestSecondaryIndexes(Tester):
                               actual=match_counts[event_source], all=match_counts))
 
         def retry_on_failure(trace, regex, expected_matches, match_counts, event_source, min_expected, max_expected):
-            debug("Trace event inspection did not match expected, sleeping before re-fetching trace events. "
+            logger.debug("Trace event inspection did not match expected, sleeping before re-fetching trace events. "
                   "Expected: {expected} Actual: {actual}".format(expected=expected_matches, actual=match_counts))
             time.sleep(2)
             trace.populate(max_wait=2.0)
@@ -568,7 +562,7 @@ class TestSecondaryIndexes(Tester):
 
         query = SimpleStatement("SELECT * FROM ks.cf WHERE b='1';")
         result = session.execute(query, trace=True)
-        self.assertEqual(3, len(list(result)))
+        assert 3 == len(list(result))
 
         trace = result.get_query_trace()
 
@@ -586,7 +580,7 @@ class TestSecondaryIndexes(Tester):
                            [("127.0.0.1", 1, 200), ("127.0.0.2", 1, 200), ("127.0.0.3", 1, 200)],
                            retry_on_failure)
 
-    @skipIf(DISABLE_VNODES, "Test should only run with vnodes")
+    @pytest.mark.vnodes
     def test_query_indexes_with_vnodes(self):
         """
         Verifies correct query behaviour in the presence of vnodes
@@ -597,7 +591,7 @@ class TestSecondaryIndexes(Tester):
         node1, node2 = cluster.nodelist()
         session = self.patient_cql_connection(node1)
         session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': '1'};")
-        session.execute("CREATE TABLE ks.compact_table (a int PRIMARY KEY, b int) WITH COMPACT STORAGE;")
+        session.execute("CREATE TABLE ks.compact_table (a int PRIMARY KEY, b int);")
         session.execute("CREATE INDEX keys_index ON ks.compact_table (b);")
         session.execute("CREATE TABLE ks.regular_table (a int PRIMARY KEY, b int)")
         session.execute("CREATE INDEX composites_index on ks.regular_table (b)")
@@ -605,7 +599,7 @@ class TestSecondaryIndexes(Tester):
         for node in cluster.nodelist():
             block_until_index_is_built(node, session, 'ks', 'regular_table', 'composites_index')
 
-        insert_args = [(i, i % 2) for i in xrange(100)]
+        insert_args = [(i, i % 2) for i in range(100)]
         execute_concurrent_with_args(session,
                                      session.prepare("INSERT INTO ks.compact_table (a, b) VALUES (?, ?)"),
                                      insert_args)
@@ -614,9 +608,9 @@ class TestSecondaryIndexes(Tester):
                                      insert_args)
 
         res = session.execute("SELECT * FROM ks.compact_table WHERE b = 0")
-        self.assertEqual(len(rows_to_list(res)), 50)
+        assert len(rows_to_list(res)) == 50
         res = session.execute("SELECT * FROM ks.regular_table WHERE b = 0")
-        self.assertEqual(len(rows_to_list(res)), 50)
+        assert len(rows_to_list(res)) == 50
 
 
 class TestSecondaryIndexesOnCollections(Tester):
@@ -649,7 +643,7 @@ class TestSecondaryIndexesOnCollections(Tester):
         results = execute_concurrent(session, cmds * 5, raise_on_first_error=True, concurrency=200)
 
         for (success, result) in results:
-            self.assertTrue(success, "didn't get success on insert: {0}".format(result))
+            assert success, "didn't get success on insert: {0}".format(result)
 
         session.execute("CREATE INDEX idx_single_tuple ON simple_with_tuple(single_tuple);")
         session.execute("CREATE INDEX idx_double_tuple ON simple_with_tuple(double_tuple);")
@@ -659,25 +653,25 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         # check if indexes work on existing data
         for n in range(50):
-            self.assertEqual(5, len(list(session.execute("select * from simple_with_tuple where single_tuple = ({0});".format(n)))))
-            self.assertEqual(0, len(list(session.execute("select * from simple_with_tuple where single_tuple = (-1);".format(n)))))
-            self.assertEqual(5, len(list(session.execute("select * from simple_with_tuple where double_tuple = ({0},{0});".format(n)))))
-            self.assertEqual(0, len(list(session.execute("select * from simple_with_tuple where double_tuple = ({0},-1);".format(n)))))
-            self.assertEqual(5, len(list(session.execute("select * from simple_with_tuple where triple_tuple = ({0},{0},{0});".format(n)))))
-            self.assertEqual(0, len(list(session.execute("select * from simple_with_tuple where triple_tuple = ({0},{0},-1);".format(n)))))
-            self.assertEqual(5, len(list(session.execute("select * from simple_with_tuple where nested_one = ({0},({0},{0}));".format(n)))))
-            self.assertEqual(0, len(list(session.execute("select * from simple_with_tuple where nested_one = ({0},({0},-1));".format(n)))))
+            assert 5 == len(list(session.execute("select * from simple_with_tuple where single_tuple = ({0});".format(n))))
+            assert 0 == len(list(session.execute("select * from simple_with_tuple where single_tuple = (-1);".format(n))))
+            assert 5 == len(list(session.execute("select * from simple_with_tuple where double_tuple = ({0},{0});".format(n))))
+            assert 0 == len(list(session.execute("select * from simple_with_tuple where double_tuple = ({0},-1);".format(n))))
+            assert 5 == len(list(session.execute("select * from simple_with_tuple where triple_tuple = ({0},{0},{0});".format(n))))
+            assert 0 == len(list(session.execute("select * from simple_with_tuple where triple_tuple = ({0},{0},-1);".format(n))))
+            assert 5 == len(list(session.execute("select * from simple_with_tuple where nested_one = ({0},({0},{0}));".format(n))))
+            assert 0 == len(list(session.execute("select * from simple_with_tuple where nested_one = ({0},({0},-1));".format(n))))
 
         # check if indexes work on new data inserted after index creation
         results = execute_concurrent(session, cmds * 3, raise_on_first_error=True, concurrency=200)
         for (success, result) in results:
-            self.assertTrue(success, "didn't get success on insert: {0}".format(result))
+            assert success, "didn't get success on insert: {0}".format(result)
         time.sleep(5)
         for n in range(50):
-            self.assertEqual(8, len(list(session.execute("select * from simple_with_tuple where single_tuple = ({0});".format(n)))))
-            self.assertEqual(8, len(list(session.execute("select * from simple_with_tuple where double_tuple = ({0},{0});".format(n)))))
-            self.assertEqual(8, len(list(session.execute("select * from simple_with_tuple where triple_tuple = ({0},{0},{0});".format(n)))))
-            self.assertEqual(8, len(list(session.execute("select * from simple_with_tuple where nested_one = ({0},({0},{0}));".format(n)))))
+            assert 8 == len(list(session.execute("select * from simple_with_tuple where single_tuple = ({0});".format(n))))
+            assert 8 == len(list(session.execute("select * from simple_with_tuple where double_tuple = ({0},{0});".format(n))))
+            assert 8 == len(list(session.execute("select * from simple_with_tuple where triple_tuple = ({0},{0},{0});".format(n))))
+            assert 8 == len(list(session.execute("select * from simple_with_tuple where nested_one = ({0},({0},{0}));".format(n))))
 
         # check if indexes work on mutated data
         for n in range(5):
@@ -698,15 +692,15 @@ class TestSecondaryIndexesOnCollections(Tester):
                 session.execute("update simple_with_tuple set nested_one = (-999,(-999,-999)) where id = {0}".format(row.id))
 
         for n in range(5):
-            self.assertEqual(0, len(list(session.execute("select * from simple_with_tuple where single_tuple = ({0});".format(n)))))
-            self.assertEqual(0, len(list(session.execute("select * from simple_with_tuple where double_tuple = ({0},{0});".format(n)))))
-            self.assertEqual(0, len(list(session.execute("select * from simple_with_tuple where triple_tuple = ({0},{0},{0});".format(n)))))
-            self.assertEqual(0, len(list(session.execute("select * from simple_with_tuple where nested_one = ({0},({0},{0}));".format(n)))))
+            assert 0 == len(list(session.execute("select * from simple_with_tuple where single_tuple = ({0});".format(n))))
+            assert 0 == len(list(session.execute("select * from simple_with_tuple where double_tuple = ({0},{0});".format(n))))
+            assert 0 == len(list(session.execute("select * from simple_with_tuple where triple_tuple = ({0},{0},{0});".format(n))))
+            assert 0 == len(list(session.execute("select * from simple_with_tuple where nested_one = ({0},({0},{0}));".format(n))))
 
-        self.assertEqual(40, len(list(session.execute("select * from simple_with_tuple where single_tuple = (-999);"))))
-        self.assertEqual(40, len(list(session.execute("select * from simple_with_tuple where double_tuple = (-999,-999);"))))
-        self.assertEqual(40, len(list(session.execute("select * from simple_with_tuple where triple_tuple = (-999,-999,-999);"))))
-        self.assertEqual(40, len(list(session.execute("select * from simple_with_tuple where nested_one = (-999,(-999,-999));"))))
+        assert 40 == len(list(session.execute("select * from simple_with_tuple where single_tuple = (-999);")))
+        assert 40 == len(list(session.execute("select * from simple_with_tuple where double_tuple = (-999,-999);")))
+        assert 40 == len(list(session.execute("select * from simple_with_tuple where triple_tuple = (-999,-999,-999);")))
+        assert 40 == len(list(session.execute("select * from simple_with_tuple where nested_one = (-999,(-999,-999));")))
 
     def test_list_indexes(self):
         """
@@ -731,7 +725,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = ("SELECT * from list_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
         row = list(session.execute(stmt))
-        self.assertEqual(0, len(row))
+        assert 0 == len(row)
 
         # add a row which doesn't specify data for the indexed column, and query again
         user1_uuid = uuid.uuid4()
@@ -742,7 +736,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = ("SELECT * from list_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
         row = list(session.execute(stmt))
-        self.assertEqual(0, len(row))
+        assert 0 == len(row)
 
         _id = uuid.uuid4()
         # alter the row to add a single item to the indexed list
@@ -752,7 +746,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = ("SELECT * from list_index_search.users where uuids contains {some_uuid}").format(some_uuid=_id)
         row = list(session.execute(stmt))
-        self.assertEqual(1, len(row))
+        assert 1 == len(row)
 
         # add a bunch of user records and query them back
         shared_uuid = uuid.uuid4()  # this uuid will be on all records
@@ -779,7 +773,7 @@ class TestSecondaryIndexesOnCollections(Tester):
         stmt = ("SELECT * from list_index_search.users where uuids contains {shared_uuid}").format(shared_uuid=shared_uuid)
         rows = list(session.execute(stmt))
         result = [row for row in rows]
-        self.assertEqual(50000, len(result))
+        assert 50000 == len(result)
 
         # shuffle the log in-place, and double-check a slice of records by querying the secondary index
         random.shuffle(log)
@@ -789,14 +783,14 @@ class TestSecondaryIndexesOnCollections(Tester):
                     ).format(unshared_uuid=log_entry['unshared_uuid'])
             rows = list(session.execute(stmt))
 
-            self.assertEqual(1, len(rows))
+            assert 1 == len(rows)
 
             db_user_id, db_email, db_uuids = rows[0]
 
-            self.assertEqual(db_user_id, log_entry['user_id'])
-            self.assertEqual(db_email, log_entry['email'])
-            self.assertEqual(str(db_uuids[0]), str(shared_uuid))
-            self.assertEqual(str(db_uuids[1]), str(log_entry['unshared_uuid']))
+            assert db_user_id == log_entry['user_id']
+            assert db_email == log_entry['email']
+            assert str(db_uuids[0]) == str(shared_uuid)
+            assert str(db_uuids[1]) == str(log_entry['unshared_uuid'])
 
     def test_set_indexes(self):
         """
@@ -820,7 +814,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = ("SELECT * from set_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
         row = list(session.execute(stmt))
-        self.assertEqual(0, len(row))
+        assert 0 == len(row)
 
         # add a row which doesn't specify data for the indexed column, and query again
         user1_uuid = uuid.uuid4()
@@ -830,7 +824,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = ("SELECT * from set_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
         row = list(session.execute(stmt))
-        self.assertEqual(0, len(row))
+        assert 0 == len(row)
 
         _id = uuid.uuid4()
         # alter the row to add a single item to the indexed set
@@ -839,7 +833,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = ("SELECT * from set_index_search.users where uuids contains {some_uuid}").format(some_uuid=_id)
         row = list(session.execute(stmt))
-        self.assertEqual(1, len(row))
+        assert 1 == len(row)
 
         # add a bunch of user records and query them back
         shared_uuid = uuid.uuid4()  # this uuid will be on all records
@@ -866,7 +860,7 @@ class TestSecondaryIndexesOnCollections(Tester):
         stmt = ("SELECT * from set_index_search.users where uuids contains {shared_uuid}").format(shared_uuid=shared_uuid)
         rows = session.execute(stmt)
         result = [row for row in rows]
-        self.assertEqual(50000, len(result))
+        assert 50000 == len(result)
 
         # shuffle the log in-place, and double-check a slice of records by querying the secondary index
         random.shuffle(log)
@@ -876,14 +870,14 @@ class TestSecondaryIndexesOnCollections(Tester):
                     ).format(unshared_uuid=log_entry['unshared_uuid'])
             rows = list(session.execute(stmt))
 
-            self.assertEqual(1, len(rows))
+            assert 1 == len(rows)
 
             db_user_id, db_email, db_uuids = rows[0]
 
-            self.assertEqual(db_user_id, log_entry['user_id'])
-            self.assertEqual(db_email, log_entry['email'])
-            self.assertTrue(shared_uuid in db_uuids)
-            self.assertTrue(log_entry['unshared_uuid'] in db_uuids)
+            assert db_user_id == log_entry['user_id']
+            assert db_email == log_entry['email']
+            assert shared_uuid in db_uuids
+            assert log_entry['unshared_uuid'] in db_uuids
 
     @since('3.0')
     def test_multiple_indexes_on_single_map_column(self):
@@ -916,29 +910,29 @@ class TestSecondaryIndexesOnCollections(Tester):
         session.execute("INSERT INTO map_tbl (id, amap) values (uuid(), {'faz': 1, 'baz': 2});")
 
         value_search = list(session.execute("SELECT * FROM map_tbl WHERE amap CONTAINS 1"))
-        self.assertEqual(2, len(value_search), "incorrect number of rows when querying on map values")
+        assert 2 == len(value_search), "incorrect number of rows when querying on map values"
 
         key_search = list(session.execute("SELECT * FROM map_tbl WHERE amap CONTAINS KEY 'foo'"))
-        self.assertEqual(1, len(key_search), "incorrect number of rows when querying on map keys")
+        assert 1 == len(key_search), "incorrect number of rows when querying on map keys"
 
         entries_search = list(session.execute("SELECT * FROM map_tbl WHERE amap['foo'] = 1"))
-        self.assertEqual(1, len(entries_search), "incorrect number of rows when querying on map entries")
+        assert 1 == len(entries_search), "incorrect number of rows when querying on map entries"
 
         session.cluster.refresh_schema_metadata()
         table_meta = session.cluster.metadata.keyspaces["map_double_index"].tables["map_tbl"]
-        self.assertEqual(3, len(table_meta.indexes))
-        self.assertItemsEqual(['map_keys', 'map_values', 'map_entries'], table_meta.indexes)
-        self.assertEqual(3, len(session.cluster.metadata.keyspaces["map_double_index"].indexes))
+        assert 3 == len(table_meta.indexes)
+        assert {'map_keys', 'map_values', 'map_entries'} == set(table_meta.indexes.keys())
+        assert 3 == len(session.cluster.metadata.keyspaces["map_double_index"].indexes)
 
-        self.assertTrue('map_keys' in table_meta.export_as_string())
-        self.assertTrue('map_values' in table_meta.export_as_string())
-        self.assertTrue('map_entries' in table_meta.export_as_string())
+        assert 'map_keys' in table_meta.export_as_string()
+        assert 'map_values' in table_meta.export_as_string()
+        assert 'map_entries' in table_meta.export_as_string()
 
         session.execute("DROP TABLE map_tbl")
         session.cluster.refresh_schema_metadata()
-        self.assertEqual(0, len(session.cluster.metadata.keyspaces["map_double_index"].indexes))
+        assert 0 == len(session.cluster.metadata.keyspaces["map_double_index"].indexes)
 
-    @skipIf(OFFHEAP_MEMTABLES, 'Hangs with offheap memtables')
+    @pytest.mark.no_offheap_memtables
     def test_map_indexes(self):
         """
         Checks that secondary indexes on maps work for querying on both keys and values
@@ -961,7 +955,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = "SELECT * from map_index_search.users where uuids contains key {some_uuid}".format(some_uuid=uuid.uuid4())
         rows = list(session.execute(stmt))
-        self.assertEqual(0, len(rows))
+        assert 0 == len(rows)
 
         # add a row which doesn't specify data for the indexed column, and query again
         user1_uuid = uuid.uuid4()
@@ -972,7 +966,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = ("SELECT * from map_index_search.users where uuids contains key {some_uuid}").format(some_uuid=uuid.uuid4())
         rows = list(session.execute(stmt))
-        self.assertEqual(0, len(rows))
+        assert 0 == len(rows)
 
         _id = uuid.uuid4()
 
@@ -983,7 +977,7 @@ class TestSecondaryIndexesOnCollections(Tester):
 
         stmt = ("SELECT * from map_index_search.users where uuids contains key {some_uuid}").format(some_uuid=_id)
         rows = list(session.execute(stmt))
-        self.assertEqual(1, len(rows))
+        assert 1 == len(rows)
 
         # add a bunch of user records and query them back
         shared_uuid = uuid.uuid4()  # this uuid will be on all records
@@ -1012,7 +1006,7 @@ class TestSecondaryIndexesOnCollections(Tester):
                 ).format(shared_uuid=shared_uuid)
         rows = session.execute(stmt)
         result = [row for row in rows]
-        self.assertEqual(50000, len(result))
+        assert 50000 == len(result)
 
         # shuffle the log in-place, and double-check a slice of records by querying the secondary index on keys
         random.shuffle(log)
@@ -1023,15 +1017,15 @@ class TestSecondaryIndexesOnCollections(Tester):
             row = session.execute(stmt)
 
             result = list(row)
-            rows = self.assertEqual(1, len(result))
+            assert 1 == len(result)
 
             db_user_id, db_email, db_uuids = result[0]
 
-            self.assertEqual(db_user_id, log_entry['user_id'])
-            self.assertEqual(db_email, log_entry['email'])
+            assert db_user_id == log_entry['user_id']
+            assert db_email == log_entry['email']
 
-            self.assertTrue(shared_uuid in db_uuids)
-            self.assertTrue(log_entry['unshared_uuid1'] in db_uuids)
+            assert shared_uuid in db_uuids
+            assert log_entry['unshared_uuid1'] in db_uuids
 
         # attempt to add an index on map values as well (should fail pre 3.0)
         stmt = "CREATE INDEX user_uuids_values on map_index_search.users (uuids);"
@@ -1066,14 +1060,14 @@ class TestSecondaryIndexesOnCollections(Tester):
                     ).format(unshared_uuid2=log_entry['unshared_uuid2'])
 
             rows = list(session.execute(stmt))
-            self.assertEqual(1, len(rows), rows)
+            assert 1 == len(rows), rows
 
             db_user_id, db_email, db_uuids = rows[0]
-            self.assertEqual(db_user_id, log_entry['user_id'])
-            self.assertEqual(db_email, log_entry['email'])
+            assert db_user_id == log_entry['user_id']
+            assert db_email == log_entry['email']
 
-            self.assertTrue(shared_uuid in db_uuids)
-            self.assertTrue(log_entry['unshared_uuid2'] in db_uuids.values())
+            assert shared_uuid in db_uuids
+            assert log_entry['unshared_uuid2'] in list(db_uuids.values())
 
 
 class TestUpgradeSecondaryIndexes(Tester):
@@ -1106,22 +1100,22 @@ class TestUpgradeSecondaryIndexes(Tester):
         node1.drain()
         node1.watch_log_for("DRAINED")
         node1.stop(wait_other_notice=False)
-        debug("Upgrading to current version")
+        logger.debug("Upgrading to current version")
         self.set_node_to_current_version(node1)
         node1.start(wait_other_notice=True)
 
         [node1] = cluster.nodelist()
         session = self.patient_cql_connection(node1)
-        debug(cluster.cassandra_version())
+        logger.debug(cluster.cassandra_version())
         assert_one(session, query, [0, 0])
 
     def upgrade_to_version(self, tag, nodes=None):
-        debug('Upgrading to ' + tag)
+        logger.debug('Upgrading to ' + tag)
         if nodes is None:
             nodes = self.cluster.nodelist()
 
         for node in nodes:
-            debug('Shutting down node: ' + node.name)
+            logger.debug('Shutting down node: ' + node.name)
             node.drain()
             node.watch_log_for("DRAINED")
             node.stop(wait_other_notice=False)
@@ -1129,25 +1123,24 @@ class TestUpgradeSecondaryIndexes(Tester):
         # Update Cassandra Directory
         for node in nodes:
             node.set_install_dir(version=tag)
-            debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
+            logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
         self.cluster.set_install_dir(version=tag)
 
         # Restart nodes on new version
         for node in nodes:
-            debug('Starting %s on new version (%s)' % (node.name, tag))
+            logger.debug('Starting %s on new version (%s)' % (node.name, tag))
             # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
             node.set_log_level("INFO")
             node.start(wait_other_notice=True)
             # node.nodetool('upgradesstables -a')
 
 
-@skipIf(CASSANDRA_VERSION_FROM_BUILD == '3.9', "Test doesn't run on 3.9")
 @since('3.10')
 class TestPreJoinCallback(Tester):
 
-    def __init__(self, *args, **kwargs):
-        # Ignore these log patterns:
-        self.ignore_log_patterns = [
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = [
             # ignore all streaming errors during bootstrap
             r'Exception encountered during startup',
             r'Streaming error occurred',
@@ -1156,7 +1149,6 @@ class TestPreJoinCallback(Tester):
             r'\[Stream.*\] Remote peer 127.0.0.\d:7000 failed stream session',
             r'Error while waiting on bootstrap to complete. Bootstrap will have to be restarted.'
         ]
-        Tester.__init__(self, *args, **kwargs)
 
     def _base_test(self, joinFn):
         cluster = self.cluster
@@ -1182,16 +1174,16 @@ class TestPreJoinCallback(Tester):
         # Run the join function to test
         joinFn(cluster, tokens[1])
 
-    def bootstrap_test(self):
+    def test_bootstrap(self):
         def bootstrap(cluster, token):
             node2 = new_node(cluster)
             node2.set_configuration_options(values={'initial_token': token})
             node2.start(wait_for_binary_proto=True)
-            self.assertTrue(node2.grep_log('Executing pre-join post-bootstrap tasks'))
+            assert node2.grep_log('Executing pre-join post-bootstrap tasks')
 
         self._base_test(bootstrap)
 
-    def resume_test(self):
+    def test_resume(self):
         def resume(cluster, token):
             node1 = cluster.nodes['node1']
             # set up byteman on node1 to inject a failure when streaming to node2
@@ -1217,33 +1209,33 @@ class TestPreJoinCallback(Tester):
 
             node2.nodetool("bootstrap resume")
             assert_bootstrap_state(self, node2, 'COMPLETED')
-            self.assertTrue(node2.grep_log('Executing pre-join post-bootstrap tasks'))
+            assert node2.grep_log('Executing pre-join post-bootstrap tasks')
 
         self._base_test(resume)
 
-    def manual_join_test(self):
+    def test_manual_join(self):
         def manual_join(cluster, token):
             node2 = new_node(cluster)
             node2.set_configuration_options(values={'initial_token': token})
             node2.start(join_ring=False, wait_for_binary_proto=True, wait_other_notice=240)
-            self.assertTrue(node2.grep_log('Not joining ring as requested'))
-            self.assertFalse(node2.grep_log('Executing pre-join'))
+            assert node2.grep_log('Not joining ring as requested')
+            assert not node2.grep_log('Executing pre-join')
 
             node2.nodetool("join")
-            self.assertTrue(node2.grep_log('Executing pre-join post-bootstrap tasks'))
+            assert node2.grep_log('Executing pre-join post-bootstrap tasks')
 
         self._base_test(manual_join)
 
-    def write_survey_test(self):
+    def test_write_survey(self):
         def write_survey_and_join(cluster, token):
             node2 = new_node(cluster)
             node2.set_configuration_options(values={'initial_token': token})
             node2.start(jvm_args=["-Dcassandra.write_survey=true"], wait_for_binary_proto=True)
-            self.assertTrue(node2.grep_log('Startup complete, but write survey mode is active, not becoming an active ring member.'))
-            self.assertFalse(node2.grep_log('Executing pre-join'))
+            assert node2.grep_log('Startup complete, but write survey mode is active, not becoming an active ring member.')
+            assert not node2.grep_log('Executing pre-join')
 
             node2.nodetool("join")
-            self.assertTrue(node2.grep_log('Leaving write survey mode and joining ring at operator request'))
-            self.assertTrue(node2.grep_log('Executing pre-join post-bootstrap tasks'))
+            assert node2.grep_log('Leaving write survey mode and joining ring at operator request')
+            assert node2.grep_log('Executing pre-join post-bootstrap tasks')
 
         self._base_test(write_survey_and_join)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/snapshot_test.py
----------------------------------------------------------------------
diff --git a/snapshot_test.py b/snapshot_test.py
index 1aa5a70..9b561ce 100644
--- a/snapshot_test.py
+++ b/snapshot_test.py
@@ -4,16 +4,20 @@ import os
 import shutil
 import subprocess
 import time
+import pytest
+import logging
 
 from cassandra.concurrent import execute_concurrent_with_args
 
-from dtest import (Tester, cleanup_cluster, create_ccm_cluster, create_ks,
-                   debug, get_test_path)
+from dtest_setup_overrides import DTestSetupOverrides
+from dtest import Tester, create_ks
 from tools.assertions import assert_one
 from tools.files import replace_in_file, safe_mkdtemp
 from tools.hacks import advance_to_next_cl_segment
-from tools.misc import ImmutableMapping
-from tools.decorators import since
+from tools.misc import ImmutableMapping, get_current_test_name
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 class SnapshotTester(Tester):
@@ -28,10 +32,10 @@ class SnapshotTester(Tester):
         execute_concurrent_with_args(session, insert_statement, args, concurrency=20)
 
     def make_snapshot(self, node, ks, cf, name):
-        debug("Making snapshot....")
+        logger.debug("Making snapshot....")
         node.flush()
         snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(ks=ks, cf=cf, name=name)
-        debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd))
+        logger.debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd))
         node.nodetool(snapshot_cmd)
         tmpdir = safe_mkdtemp()
         os.mkdir(os.path.join(tmpdir, ks))
@@ -47,8 +51,8 @@ class SnapshotTester(Tester):
                     snapshot_dir = snapshot_dirs[0]
                 else:
                     continue
-            debug("snapshot_dir is : " + snapshot_dir)
-            debug("snapshot copy is : " + tmpdir)
+            logger.debug("snapshot_dir is : " + snapshot_dir)
+            logger.debug("snapshot copy is : " + tmpdir)
 
             # Copy files from the snapshot dir to existing temp dir
             distutils.dir_util.copy_tree(str(snapshot_dir), os.path.join(tmpdir, str(x), ks, cf))
@@ -57,8 +61,8 @@ class SnapshotTester(Tester):
         return tmpdir
 
     def restore_snapshot(self, snapshot_dir, node, ks, cf):
-        debug("Restoring snapshot....")
-        for x in xrange(0, self.cluster.data_dir_count):
+        logger.debug("Restoring snapshot....")
+        for x in range(0, self.cluster.data_dir_count):
             snap_dir = os.path.join(snapshot_dir, str(x), ks, cf)
             if os.path.exists(snap_dir):
                 ip = node.address()
@@ -70,11 +74,11 @@ class SnapshotTester(Tester):
 
                 if exit_status != 0:
                     raise Exception("sstableloader command '%s' failed; exit status: %d'; stdout: %s; stderr: %s" %
-                                    (" ".join(args), exit_status, stdout, stderr))
+                                    (" ".join(args), exit_status, stdout.decode("utf-8"), stderr.decode("utf-8")))
 
     def restore_snapshot_schema(self, snapshot_dir, node, ks, cf):
-        debug("Restoring snapshot schema....")
-        for x in xrange(0, self.cluster.data_dir_count):
+        logger.debug("Restoring snapshot schema....")
+        for x in range(0, self.cluster.data_dir_count):
             schema_path = os.path.join(snapshot_dir, str(x), ks, cf, 'schema.cql')
             if os.path.exists(schema_path):
                 node.run_cqlsh(cmds="SOURCE '%s'" % schema_path)
@@ -96,13 +100,13 @@ class TestSnapshot(SnapshotTester):
         # away when we restore:
         self.insert_rows(session, 100, 200)
         rows = session.execute('SELECT count(*) from ks.cf')
-        self.assertEqual(rows[0][0], 200)
+        assert rows[0][0] == 200
 
         # Drop the keyspace, make sure we have no data:
         session.execute('DROP KEYSPACE ks')
         self.create_schema(session)
         rows = session.execute('SELECT count(*) from ks.cf')
-        self.assertEqual(rows[0][0], 0)
+        assert rows[0][0] == 0
 
         # Restore data from snapshot:
         self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
@@ -110,10 +114,10 @@ class TestSnapshot(SnapshotTester):
         rows = session.execute('SELECT count(*) from ks.cf')
 
         # clean up
-        debug("removing snapshot_dir: " + snapshot_dir)
+        logger.debug("removing snapshot_dir: " + snapshot_dir)
         shutil.rmtree(snapshot_dir)
 
-        self.assertEqual(rows[0][0], 100)
+        assert rows[0][0] == 100
 
     @since('3.0')
     def test_snapshot_and_restore_drop_table_remove_dropped_column(self):
@@ -146,7 +150,7 @@ class TestSnapshot(SnapshotTester):
         assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])
 
         # Clean up
-        debug("removing snapshot_dir: " + snapshot_dir)
+        logger.debug("removing snapshot_dir: " + snapshot_dir)
         shutil.rmtree(snapshot_dir)
 
     @since('3.11')
@@ -182,22 +186,27 @@ class TestSnapshot(SnapshotTester):
         assert_one(session, "SELECT * FROM ks.cf", [1, "a"])
 
         # Clean up
-        debug("removing snapshot_dir: " + snapshot_dir)
+        logger.debug("removing snapshot_dir: " + snapshot_dir)
         shutil.rmtree(snapshot_dir)
 
 
 class TestArchiveCommitlog(SnapshotTester):
-    cluster_options = ImmutableMapping({'commitlog_segment_size_in_mb': 1})
+
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        dtest_setup_overrides.cluster_options = ImmutableMapping({'start_rpc': 'true'})
+        return dtest_setup_overrides
 
     def make_snapshot(self, node, ks, cf, name):
-        debug("Making snapshot....")
+        logger.debug("Making snapshot....")
         node.flush()
         snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(ks=ks, cf=cf, name=name)
-        debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd))
+        logger.debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd))
         node.nodetool(snapshot_cmd)
         tmpdirs = []
         base_tmpdir = safe_mkdtemp()
-        for x in xrange(0, self.cluster.data_dir_count):
+        for x in range(0, self.cluster.data_dir_count):
             tmpdir = os.path.join(base_tmpdir, str(x))
             os.mkdir(tmpdir)
             # Copy files from the snapshot dir to existing temp dir
@@ -207,7 +216,7 @@ class TestArchiveCommitlog(SnapshotTester):
         return tmpdirs
 
     def restore_snapshot(self, snapshot_dir, node, ks, cf, name):
-        debug("Restoring snapshot for cf ....")
+        logger.debug("Restoring snapshot for cf ....")
         data_dir = os.path.join(node.get_path(), 'data{0}'.format(os.path.basename(snapshot_dir)))
         cfs = [s for s in os.listdir(snapshot_dir) if s.startswith(cf + "-")]
         if len(cfs) > 0:
@@ -220,7 +229,7 @@ class TestArchiveCommitlog(SnapshotTester):
                     os.mkdir(os.path.join(data_dir, ks))
                 os.mkdir(os.path.join(data_dir, ks, cf_id))
 
-                debug("snapshot_dir is : " + snapshot_dir)
+                logger.debug("snapshot_dir is : " + snapshot_dir)
                 distutils.dir_util.copy_tree(snapshot_dir, os.path.join(data_dir, ks, cf_id))
 
     def test_archive_commitlog(self):
@@ -232,7 +241,7 @@ class TestArchiveCommitlog(SnapshotTester):
         """
         self.run_archive_commitlog(restore_point_in_time=False, archive_active_commitlogs=True)
 
-    def dont_test_archive_commitlog(self):
+    def test_dont_archive_commitlog(self):
         """
         Run the archive commitlog test, but forget to add the restore commands
         """
@@ -267,7 +276,7 @@ class TestArchiveCommitlog(SnapshotTester):
 
         # Create a temp directory for storing commitlog archives:
         tmp_commitlog = safe_mkdtemp()
-        debug("tmp_commitlog: " + tmp_commitlog)
+        logger.debug("tmp_commitlog: " + tmp_commitlog)
 
         # Edit commitlog_archiving.properties and set an archive
         # command:
@@ -289,14 +298,14 @@ class TestArchiveCommitlog(SnapshotTester):
         )
 
         session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
-        debug("Writing first 30,000 rows...")
+        logger.debug("Writing first 30,000 rows...")
         self.insert_rows(session, 0, 30000)
         # Record when this first set of inserts finished:
         insert_cutoff_times = [time.gmtime()]
 
         # Delete all commitlog backups so far:
         for f in glob.glob(tmp_commitlog + "/*"):
-            debug('Removing {}'.format(f))
+            logger.debug('Removing {}'.format(f))
             os.remove(f)
 
         snapshot_dirs = self.make_snapshot(node1, 'ks', 'cf', 'basic')
@@ -323,14 +332,14 @@ class TestArchiveCommitlog(SnapshotTester):
 
         try:
             # Write more data:
-            debug("Writing second 30,000 rows...")
+            logger.debug("Writing second 30,000 rows...")
             self.insert_rows(session, 30000, 60000)
             node1.flush()
             time.sleep(10)
             # Record when this second set of inserts finished:
             insert_cutoff_times.append(time.gmtime())
 
-            debug("Writing final 5,000 rows...")
+            logger.debug("Writing final 5,000 rows...")
             self.insert_rows(session, 60000, 65000)
             # Record when the third set of inserts finished:
             insert_cutoff_times.append(time.gmtime())
@@ -340,17 +349,16 @@ class TestArchiveCommitlog(SnapshotTester):
 
             rows = session.execute('SELECT count(*) from ks.cf')
             # Make sure we have the same amount of rows as when we snapshotted:
-            self.assertEqual(rows[0][0], 65000)
+            assert rows[0][0] == 65000
 
             # Check that there are at least one commit log backed up that
             # is not one of the active commit logs:
             commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
-            debug("node1 commitlog dir: " + commitlog_dir)
-            debug("node1 commitlog dir contents: " + str(os.listdir(commitlog_dir)))
-            debug("tmp_commitlog contents: " + str(os.listdir(tmp_commitlog)))
+            logger.debug("node1 commitlog dir: " + commitlog_dir)
+            logger.debug("node1 commitlog dir contents: " + str(os.listdir(commitlog_dir)))
+            logger.debug("tmp_commitlog contents: " + str(os.listdir(tmp_commitlog)))
 
-            self.assertNotEqual(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir)),
-                                set())
+            assert_directory_not_empty(tmp_commitlog, commitlog_dir)
 
             cluster.flush()
             cluster.compact()
@@ -358,15 +366,16 @@ class TestArchiveCommitlog(SnapshotTester):
 
             # Destroy the cluster
             cluster.stop()
-            debug("node1 commitlog dir contents after stopping: " + str(os.listdir(commitlog_dir)))
-            debug("tmp_commitlog contents after stopping: " + str(os.listdir(tmp_commitlog)))
+            logger.debug("node1 commitlog dir contents after stopping: " + str(os.listdir(commitlog_dir)))
+            logger.debug("tmp_commitlog contents after stopping: " + str(os.listdir(tmp_commitlog)))
 
-            self.copy_logs(self.cluster, name=self.id().split(".")[0] + "_pre-restore")
-            cleanup_cluster(self.cluster, self.test_path)
-            self.test_path = get_test_path()
-            cluster = self.cluster = create_ccm_cluster(self.test_path, name='test')
+            self.copy_logs(name=get_current_test_name() + "_pre-restore")
+            self.fixture_dtest_setup.cleanup_and_replace_cluster()
+            cluster = self.cluster
             cluster.populate(1)
-            node1, = cluster.nodelist()
+            nodes = cluster.nodelist()
+            assert len(nodes) == 1
+            node1 = nodes[0]
 
             # Restore schema from snapshots:
             for system_ks_snapshot_dir in system_ks_snapshot_dirs:
@@ -400,7 +409,7 @@ class TestArchiveCommitlog(SnapshotTester):
 
             rows = session.execute('SELECT count(*) from ks.cf')
             # Make sure we have the same amount of rows as when we snapshotted:
-            self.assertEqual(rows[0][0], 30000)
+            assert rows[0][0] == 30000
 
             # Edit commitlog_archiving.properties. Remove the archive
             # command  and set a restore command and restore_directories:
@@ -416,7 +425,7 @@ class TestArchiveCommitlog(SnapshotTester):
                     replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                                     [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(restore_time=restore_time))])
 
-            debug("Restarting node1..")
+            logger.debug("Restarting node1..")
             node1.stop()
             node1.start(wait_for_binary_proto=True)
 
@@ -428,31 +437,31 @@ class TestArchiveCommitlog(SnapshotTester):
             # Now we should have 30000 rows from the snapshot + 30000 rows
             # from the commitlog backups:
             if not restore_archived_commitlog:
-                self.assertEqual(rows[0][0], 30000)
+                assert rows[0][0] == 30000
             elif restore_point_in_time:
-                self.assertEqual(rows[0][0], 60000)
+                assert rows[0][0] == 60000
             else:
-                self.assertEqual(rows[0][0], 65000)
+                assert rows[0][0] == 65000
 
         finally:
             # clean up
-            debug("removing snapshot_dir: " + ",".join(snapshot_dirs))
+            logger.debug("removing snapshot_dir: " + ",".join(snapshot_dirs))
             for snapshot_dir in snapshot_dirs:
                 shutil.rmtree(snapshot_dir)
-            debug("removing snapshot_dir: " + ",".join(system_ks_snapshot_dirs))
+            logger.debug("removing snapshot_dir: " + ",".join(system_ks_snapshot_dirs))
             for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                 shutil.rmtree(system_ks_snapshot_dir)
-            debug("removing snapshot_dir: " + ",".join(system_cfs_snapshot_dirs))
+            logger.debug("removing snapshot_dir: " + ",".join(system_cfs_snapshot_dirs))
             for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                 shutil.rmtree(system_cfs_snapshot_dir)
-            debug("removing snapshot_dir: " + ",".join(system_ut_snapshot_dirs))
+            logger.debug("removing snapshot_dir: " + ",".join(system_ut_snapshot_dirs))
             for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                 shutil.rmtree(system_ut_snapshot_dir)
-            debug("removing snapshot_dir: " + ",".join(system_col_snapshot_dirs))
+            logger.debug("removing snapshot_dir: " + ",".join(system_col_snapshot_dirs))
             for system_col_snapshot_dir in system_col_snapshot_dirs:
                 shutil.rmtree(system_col_snapshot_dir)
 
-            debug("removing tmp_commitlog: " + tmp_commitlog)
+            logger.debug("removing tmp_commitlog: " + tmp_commitlog)
             shutil.rmtree(tmp_commitlog)
 
     def test_archive_and_restore_commitlog_repeatedly(self):
@@ -461,14 +470,13 @@ class TestArchiveCommitlog(SnapshotTester):
         Run archive commit log restoration test repeatedly to make sure it is idempotent
         and doesn't fail if done repeatedly
         """
-
         cluster = self.cluster
         cluster.populate(1)
         node1 = cluster.nodelist()[0]
 
         # Create a temp directory for storing commitlog archives:
         tmp_commitlog = safe_mkdtemp()
-        debug("tmp_commitlog: {}".format(tmp_commitlog))
+        logger.debug("tmp_commitlog: {}".format(tmp_commitlog))
 
         # Edit commitlog_archiving.properties and set an archive
         # command:
@@ -481,32 +489,31 @@ class TestArchiveCommitlog(SnapshotTester):
 
         cluster.start(wait_for_binary_proto=True)
 
-        debug("Creating initial connection")
+        logger.debug("Creating initial connection")
         session = self.patient_cql_connection(node1)
         create_ks(session, 'ks', 1)
         session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
-        debug("Writing 30,000 rows...")
+        logger.debug("Writing 30,000 rows...")
         self.insert_rows(session, 0, 60000)
 
         try:
             # Check that there are at least one commit log backed up that
             # is not one of the active commit logs:
             commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
-            debug("node1 commitlog dir: " + commitlog_dir)
+            logger.debug("node1 commitlog dir: " + commitlog_dir)
 
             cluster.flush()
 
-            self.assertNotEqual(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir)),
-                                set())
+            assert_directory_not_empty(tmp_commitlog, commitlog_dir)
 
-            debug("Flushing and doing first restart")
+            logger.debug("Flushing and doing first restart")
             cluster.compact()
             node1.drain()
             # restart the node which causes the active commitlogs to be archived
             node1.stop()
             node1.start(wait_for_binary_proto=True)
 
-            debug("Stopping and second restart")
+            logger.debug("Stopping and second restart")
             node1.stop()
             node1.start(wait_for_binary_proto=True)
 
@@ -514,7 +521,14 @@ class TestArchiveCommitlog(SnapshotTester):
             session = self.patient_cql_connection(node1)
 
             rows = session.execute('SELECT count(*) from ks.cf')
-            self.assertEqual(rows[0][0], 60000)
+            assert rows[0][0] == 60000
         finally:
-            debug("removing tmp_commitlog: " + tmp_commitlog)
+            logger.debug("removing tmp_commitlog: " + tmp_commitlog)
             shutil.rmtree(tmp_commitlog)
+
+
+def assert_directory_not_empty(tmp_commitlog, commitlog_dir):
+    commitlog_dir_ret = set(commitlog_dir)
+    for tmp_commitlog_file in set(os.listdir(tmp_commitlog)):
+        commitlog_dir_ret.discard(tmp_commitlog_file)
+    assert len(commitlog_dir_ret) != 0

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/snitch_test.py
----------------------------------------------------------------------
diff --git a/snitch_test.py b/snitch_test.py
index 334a1a1..164c81e 100644
--- a/snitch_test.py
+++ b/snitch_test.py
@@ -1,16 +1,17 @@
 import os
 import socket
 import time
-
-from nose.plugins.attrib import attr
+import pytest
+import logging
 
 from cassandra import ConsistencyLevel
-from dtest import Tester, debug
-from nose.tools import assert_true, assert_equal, assert_greater_equal
-from tools.decorators import since
+from dtest import Tester
 from tools.jmxutils import (JolokiaAgent, make_mbean,
                             remove_perf_disable_shared_mem)
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 @since('2.2.5')
 class TestGossipingPropertyFileSnitch(Tester):
@@ -103,21 +104,21 @@ class TestGossipingPropertyFileSnitch(Tester):
         # read data from node2 just to make sure data and connectivity is OK
         session = self.patient_exclusive_cql_connection(node2)
         new_rows = list(session.execute("SELECT * FROM {}".format(stress_table)))
-        self.assertEquals(original_rows, new_rows)
+        assert original_rows == new_rows
 
         out, err, _ = node1.nodetool('gossipinfo')
-        self.assertEqual(0, len(err), err)
-        debug(out)
+        assert 0 == len(err), err
+        logger.debug(out)
 
-        self.assertIn("/{}".format(NODE1_BROADCAST_ADDRESS), out)
-        self.assertIn("INTERNAL_IP:{}:{}".format('9' if running40 else '6', NODE1_LISTEN_ADDRESS), out)
-        self.assertIn("INTERNAL_ADDRESS_AND_PORT:7:{}".format(NODE1_40_LISTEN_ADDRESS), out)
-        self.assertIn("/{}".format(NODE2_BROADCAST_ADDRESS), out)
-        self.assertIn("INTERNAL_IP:{}:{}".format('9' if running40 else '6', NODE2_LISTEN_ADDRESS), out)
-        self.assertIn("INTERNAL_ADDRESS_AND_PORT:7:{}".format(NODE1_40_LISTEN_ADDRESS), out)
+        assert "/{}".format(NODE1_BROADCAST_ADDRESS) in out
+        assert "INTERNAL_IP:{}:{}".format('9' if running40 else '6', NODE1_LISTEN_ADDRESS) in out
+        assert "INTERNAL_ADDRESS_AND_PORT:7:{}".format(NODE1_40_LISTEN_ADDRESS) in out
+        assert "/{}".format(NODE2_BROADCAST_ADDRESS) in out
+        assert "INTERNAL_IP:{}:{}".format('9' if running40 else '6', NODE2_LISTEN_ADDRESS) in out
+        assert "INTERNAL_ADDRESS_AND_PORT:7:{}".format(NODE1_40_LISTEN_ADDRESS) in out
 
 class TestDynamicEndpointSnitch(Tester):
-    @attr('resource-intensive')
+    @pytest.mark.resource_intensive
     @since('3.10')
     def test_multidatacenter_local_quorum(self):
         '''
@@ -175,20 +176,18 @@ class TestDynamicEndpointSnitch(Tester):
                 for x in range(0, 300):
                     degraded_reads_before = bad_jmx.read_attribute(read_stage, 'Value')
                     scores_before = jmx.read_attribute(des, 'Scores')
-                    assert_true(no_cross_dc(scores_before, [node4, node5, node6]),
-                                "Cross DC scores were present: " + str(scores_before))
+                    assert no_cross_dc(scores_before, [node4, node5, node6]), "Cross DC scores were present: " + str(scores_before)
                     future = session.execute_async(read_stmt, [x])
                     future.result()
                     scores_after = jmx.read_attribute(des, 'Scores')
-                    assert_true(no_cross_dc(scores_after, [node4, node5, node6]),
-                                "Cross DC scores were present: " + str(scores_after))
+                    assert no_cross_dc(scores_after, [node4, node5, node6]), "Cross DC scores were present: " + str(scores_after)
 
                     if snitchable(scores_before, scores_after,
                                   [coordinator_node, healthy_node, degraded_node]):
                         snitchable_count = snitchable_count + 1
                         # If the DES correctly routed the read around the degraded node,
                         # it shouldn't have another completed read request in metrics
-                        assert_equal(degraded_reads_before,
+                        assert (degraded_reads_before ==
                                      bad_jmx.read_attribute(read_stage, 'Value'))
                     else:
                         # sleep to give dynamic snitch time to recalculate scores
@@ -196,4 +195,4 @@ class TestDynamicEndpointSnitch(Tester):
 
                 # check that most reads were snitchable, with some
                 # room allowed in case score recalculation is slow
-                assert_greater_equal(snitchable_count, 250)
+                assert snitchable_count >= 250

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/sslnodetonode_test.py
----------------------------------------------------------------------
diff --git a/sslnodetonode_test.py b/sslnodetonode_test.py
index 4c4a188..c34fa11 100644
--- a/sslnodetonode_test.py
+++ b/sslnodetonode_test.py
@@ -2,10 +2,14 @@ import os
 import os.path
 import shutil
 import time
+import pytest
+import logging
 
 from dtest import Tester
 from tools import sslkeygen
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 # as the error message logged will be different per netty ssl implementation (jdk vs openssl (libre vs boring vs ...)),
 # the best we can do is just look for a SSLHandshakeException
@@ -16,9 +20,8 @@ _LOG_ERR_GENERAL = "javax.net.ssl.SSLException"
 @since('3.6')
 class TestNodeToNodeSSLEncryption(Tester):
 
-    def ssl_enabled_test(self):
+    def test_ssl_enabled(self):
         """Should be able to start with valid ssl options"""
-
         credNode1 = sslkeygen.generate_credentials("127.0.0.1")
         credNode2 = sslkeygen.generate_credentials("127.0.0.2", credNode1.cakeystore, credNode1.cacert)
 
@@ -26,21 +29,19 @@ class TestNodeToNodeSSLEncryption(Tester):
         self.cluster.start()
         self.cql_connection(self.node1)
 
-    def ssl_correct_hostname_with_validation_test(self):
+    def test_ssl_correct_hostname_with_validation(self):
         """Should be able to start with valid ssl options"""
-
         credNode1 = sslkeygen.generate_credentials("127.0.0.1")
         credNode2 = sslkeygen.generate_credentials("127.0.0.2", credNode1.cakeystore, credNode1.cacert)
 
         self.setup_nodes(credNode1, credNode2, endpoint_verification=True)
-        self.allow_log_errors = False
+        self.fixture_dtest_setup.allow_log_errors = False
         self.cluster.start()
         time.sleep(2)
         self.cql_connection(self.node1)
 
-    def ssl_wrong_hostname_no_validation_test(self):
+    def test_ssl_wrong_hostname_no_validation(self):
         """Should be able to start with valid ssl options"""
-
         credNode1 = sslkeygen.generate_credentials("127.0.0.80")
         credNode2 = sslkeygen.generate_credentials("127.0.0.81", credNode1.cakeystore, credNode1.cacert)
 
@@ -49,49 +50,46 @@ class TestNodeToNodeSSLEncryption(Tester):
         time.sleep(2)
         self.cql_connection(self.node1)
 
-    def ssl_wrong_hostname_with_validation_test(self):
+    def test_ssl_wrong_hostname_with_validation(self):
         """Should be able to start with valid ssl options"""
-
         credNode1 = sslkeygen.generate_credentials("127.0.0.80")
         credNode2 = sslkeygen.generate_credentials("127.0.0.81", credNode1.cakeystore, credNode1.cacert)
 
         self.setup_nodes(credNode1, credNode2, endpoint_verification=True)
 
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
         self.cluster.start(no_wait=True)
 
         found = self._grep_msg(self.node1, _LOG_ERR_HANDSHAKE, _LOG_ERR_GENERAL)
-        self.assertTrue(found)
+        assert found
 
         found = self._grep_msg(self.node2, _LOG_ERR_HANDSHAKE, _LOG_ERR_GENERAL)
-        self.assertTrue(found)
+        assert found
 
         self.cluster.stop()
 
-    def ssl_client_auth_required_fail_test(self):
+    def test_ssl_client_auth_required_fail(self):
         """peers need to perform mutual auth (cient auth required), but do not supply the local cert"""
-
         credNode1 = sslkeygen.generate_credentials("127.0.0.1")
         credNode2 = sslkeygen.generate_credentials("127.0.0.2")
 
         self.setup_nodes(credNode1, credNode2, client_auth=True)
 
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
         self.cluster.start(no_wait=True)
         time.sleep(2)
 
         found = self._grep_msg(self.node1, _LOG_ERR_HANDSHAKE, _LOG_ERR_GENERAL)
-        self.assertTrue(found)
+        assert found
 
         found = self._grep_msg(self.node2, _LOG_ERR_HANDSHAKE, _LOG_ERR_GENERAL)
-        self.assertTrue(found)
+        assert found
 
         self.cluster.stop()
-        self.assertTrue(found)
+        assert found
 
-    def ssl_client_auth_required_succeed_test(self):
+    def test_ssl_client_auth_required_succeed(self):
         """peers need to perform mutual auth (cient auth required), but do not supply the loca cert"""
-
         credNode1 = sslkeygen.generate_credentials("127.0.0.1")
         credNode2 = sslkeygen.generate_credentials("127.0.0.2", credNode1.cakeystore, credNode1.cacert)
         sslkeygen.import_cert(credNode1.basedir, 'ca127.0.0.2', credNode2.cacert, credNode1.cakeystore)
@@ -102,23 +100,22 @@ class TestNodeToNodeSSLEncryption(Tester):
         self.cluster.start()
         self.cql_connection(self.node1)
 
-    def ca_mismatch_test(self):
+    def test_ca_mismatch(self):
         """CA mismatch should cause nodes to fail to connect"""
-
         credNode1 = sslkeygen.generate_credentials("127.0.0.1")
         credNode2 = sslkeygen.generate_credentials("127.0.0.2")  # mismatching CA!
 
         self.setup_nodes(credNode1, credNode2)
 
-        self.allow_log_errors = True
+        self.fixture_dtest_setup.allow_log_errors = True
         self.cluster.start(no_wait=True)
 
         found = self._grep_msg(self.node1, _LOG_ERR_HANDSHAKE)
         self.cluster.stop()
-        self.assertTrue(found)
+        assert found
 
     @since('4.0')
-    def optional_outbound_tls_test(self):
+    def test_optional_outbound_tls(self):
         """listen on TLS port, but optionally connect using TLS. this supports the upgrade case of starting with a non-encrypted cluster and then upgrading each node to use encryption.
 
         @jira_ticket CASSANDRA-10404


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org