You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by mc...@apache.org on 2021/01/29 12:41:43 UTC

[cassandra-dtest] branch trunk updated (ec84618 -> ebb59dd)

This is an automated email from the ASF dual-hosted git repository.

mck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra-dtest.git.


    from ec84618  Listing tests (--collect-only and --dtest-print-tests-only) only lists tests that will run according to other arguments specified
     new 2a721e7  Handle NodeError as it may be raised from ccm
     new ebb59dd  Improve flake8

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 commitlog_test.py                  |  18 +++---
 transient_replication_ring_test.py | 110 +++++++++++++++++--------------------
 2 files changed, 57 insertions(+), 71 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[cassandra-dtest] 01/02: Handle NodeError as it may be raised from ccm

Posted by mc...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra-dtest.git

commit 2a721e7b058025f42f8b86c8a2ecd851c2790f00
Author: Tomek Lasica <to...@datastax.com>
AuthorDate: Tue Jan 26 16:29:20 2021 +0100

    Handle NodeError as it may be raised from ccm
    
    patch by Tomek Łasica; reviewed by Mick Semb Wever for CASSANDRA-16405
---
 commitlog_test.py | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/commitlog_test.py b/commitlog_test.py
index b0c769f..6ff11cd 100644
--- a/commitlog_test.py
+++ b/commitlog_test.py
@@ -11,7 +11,7 @@ import logging
 from cassandra import WriteTimeout
 from cassandra.cluster import NoHostAvailable, OperationTimedOut
 from ccmlib.common import is_win
-from ccmlib.node import Node, TimeoutError
+from ccmlib.node import Node, TimeoutError, NodeError
 from parse import parse
 
 from dtest import Tester, create_ks
@@ -43,16 +43,14 @@ class TestCommitLog(Tester):
         # so this changes them back so we can delete them.
         self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
 
-
     def prepare(self, configuration=None, create_test_keyspace=True, **kwargs):
         if configuration is None:
             configuration = {}
         default_conf = {'commitlog_sync_period_in_ms': 1000}
 
         set_conf = dict(default_conf, **configuration)
-        logger.debug('setting commitlog configuration with the following values: '
-              '{set_conf} and the following kwargs: {kwargs}'.format(
-                  set_conf=set_conf, kwargs=kwargs))
+        logger.debug('setting commitlog configuration with the following values: {set_conf}'
+                     ' and the following kwargs: {kwargs}'.format(set_conf=set_conf, kwargs=kwargs))
         self.cluster.set_configuration_options(values=set_conf, **kwargs)
         self.cluster.start()
         self.session1 = self.patient_cql_connection(self.node1)
@@ -76,10 +74,10 @@ class TestCommitLog(Tester):
 
             if commitlogs:
                 logger.debug('changing permissions to {perms} on the following files:'
-                      '\n  {files}'.format(perms=oct(mod), files='\n  '.join(commitlogs)))
+                             '\n  {files}'.format(perms=oct(mod), files='\n  '.join(commitlogs)))
             else:
                 logger.debug(self._change_commitlog_perms.__name__ + ' called on empty commitlog directory '
-                      '{path} with permissions {perms}'.format(path=path, perms=oct(mod)))
+                                                                     '{path} with permissions {perms}'.format(path=path, perms=oct(mod)))
 
             for commitlog in commitlogs:
                 os.chmod(commitlog, mod)
@@ -503,7 +501,7 @@ class TestCommitLog(Tester):
         mark = node.mark_log()
         node.start()
         node.watch_log_for(expected_error, from_mark=mark)
-        with pytest.raises(TimeoutError):
+        with pytest.raises((TimeoutError, NodeError)):
             node.wait_for_binary_interface(from_mark=mark, timeout=20)
         assert not node.is_running()
 
@@ -620,10 +618,10 @@ class TestCommitLog(Tester):
                 # "Changed in version 2.6: The return value is in the range [-2**31, 2**31-1] regardless
                 # of platform. In the past the value would be signed on some platforms and unsigned on
                 # others. Use & 0xffffffff on the value if you want it to match Python 3 behavior."
-                assert (crc & 0xffffffff)  == get_header_crc(header_bytes)
+                assert (crc & 0xffffffff) == get_header_crc(header_bytes)
 
         mark = node.mark_log()
         node.start()
         node.watch_log_for(expected_error, from_mark=mark)
-        with pytest.raises(TimeoutError):
+        with pytest.raises((TimeoutError, NodeError)):
             node.wait_for_binary_interface(from_mark=mark, timeout=20)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[cassandra-dtest] 02/02: Improve flake8

Posted by mc...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra-dtest.git

commit ebb59dd7cf0c4fb4a9d9135cf6442e59c109de31
Author: Tomek Lasica <to...@datastax.com>
AuthorDate: Tue Jan 26 20:40:07 2021 +0100

    Improve flake8
    
    patch by Tomek Łasica; reviewed by Mick Semb Wever
---
 transient_replication_ring_test.py | 110 +++++++++++++++++--------------------
 1 file changed, 49 insertions(+), 61 deletions(-)

diff --git a/transient_replication_ring_test.py b/transient_replication_ring_test.py
index e9ccfa1..32a595a 100644
--- a/transient_replication_ring_test.py
+++ b/transient_replication_ring_test.py
@@ -21,6 +21,7 @@ logging.getLogger('cassandra').setLevel(logging.CRITICAL)
 
 NODELOCAL = 11
 
+
 def jmx_start(to_start, **kwargs):
     kwargs['jvm_args'] = kwargs.get('jvm_args', []) + ['-XX:-PerfDisableSharedMem']
     to_start.start(**kwargs)
@@ -34,10 +35,12 @@ def repair_nodes(nodes):
     for node in nodes:
         node.nodetool('repair -pr')
 
+
 def cleanup_nodes(nodes):
     for node in nodes:
         node.nodetool('cleanup')
 
+
 def patch_start(startable):
     old_start = startable.start
 
@@ -67,8 +70,10 @@ class TestTransientReplicationRing(Tester):
     def point_select_statement(self):
         return SimpleStatement(self.point_select(), consistency_level=NODELOCAL)
 
-    def check_expected(self, sessions, expected, node=[i for i in range(0,1000)], cleanup=False):
+    def check_expected(self, sessions, expected, node=None, cleanup=False):
         """Check that each node has the expected values present"""
+        if node is None:
+            node = list(range(1000))
         for idx, session, expect, node in zip(range(0, 1000), sessions, expected, node):
             print("Checking idx " + str(idx))
             print(str([row for row in session.execute(self.select_statement())]))
@@ -84,10 +89,10 @@ class TestTransientReplicationRing(Tester):
         for i in range(0, 40):
             count = 0
             for session in sessions:
-                for row in session.execute(self.point_select_statement(), ["%05d" % i]):
+                for _ in session.execute(self.point_select_statement(), ["%05d" % i]):
                     count += 1
             if exactly:
-                assert count == exactly, "Wrong replication for %05d should be exactly" % (i, exactly)
+                assert count == exactly, "Wrong replication for %05d should be exactly %d" % (i, exactly)
             if gte:
                 assert count >= gte, "Count for %05d should be >= %d" % (i, gte)
             if lte:
@@ -106,7 +111,7 @@ class TestTransientReplicationRing(Tester):
                                                        'num_tokens': 1,
                                                        'commitlog_sync_period_in_ms': 500,
                                                        'enable_transient_replication': True,
-                                                       'partitioner' : 'org.apache.cassandra.dht.OrderPreservingPartitioner'})
+                                                       'partitioner': 'org.apache.cassandra.dht.OrderPreservingPartitioner'})
         print("CLUSTER INSTALL DIR: ")
         print(self.cluster.get_install_dir())
         self.cluster.populate(3, tokens=self.tokens, debug=True, install_byteman=True)
@@ -131,17 +136,17 @@ class TestTransientReplicationRing(Tester):
         print("CREATE KEYSPACE %s WITH REPLICATION={%s}" % (self.keyspace, replication_params))
         session.execute(
             "CREATE TABLE %s.%s (pk varchar, ck int, value int, PRIMARY KEY (pk, ck)) WITH speculative_retry = 'NEVER' AND additional_write_policy = 'NEVER' AND read_repair = 'NONE'" % (
-            self.keyspace, self.table))
+                self.keyspace, self.table))
 
     def quorum(self, session, stmt_str):
         return session.execute(SimpleStatement(stmt_str, consistency_level=ConsistencyLevel.QUORUM))
 
     def insert_row(self, pk, ck, value, session=None, node=None):
         session = session or self.exclusive_cql_connection(node or self.node1)
-        #token = BytesToken.from_key(pack('>i', pk)).value
-        #assert token < BytesToken.from_string(self.tokens[0]).value or BytesToken.from_string(self.tokens[-1]).value < token   # primary replica should be node1
-        #TODO Is quorum really right? I mean maybe we want ALL with retries since we really don't want to the data
-        #not at a replica unless it is intentional
+        # token = BytesToken.from_key(pack('>i', pk)).value
+        # assert token < BytesToken.from_string(self.tokens[0]).value or BytesToken.from_string(self.tokens[-1]).value < token   # primary replica should be node1
+        # TODO Is quorum really right? I mean maybe we want ALL with retries since we really don't want to the data
+        # not at a replica unless it is intentional
         self.quorum(session, "INSERT INTO %s.%s (pk, ck, value) VALUES ('%05d', %s, %s)" % (self.keyspace, self.table, pk, ck, value))
 
     @flaky(max_runs=1)
@@ -161,13 +166,13 @@ class TestTransientReplicationRing(Tester):
                     gen_expected(range(12, 31, 2))]
         self.check_expected(sessions, expected)
 
-        #Make sure at least a little data is repaired, this shouldn't move data anywhere
+        # Make sure at least a little data is repaired, this shouldn't move data anywhere
         repair_nodes(nodes)
 
         self.check_expected(sessions, expected)
 
-        #Ensure that there is at least some transient data around, because of this if it's missing after bootstrap
-        #We know we failed to get it from the transient replica losing the range entirely
+        # Ensure that there is at least some transient data around, because of this if it's missing after bootstrap
+        # We know we failed to get it from the transient replica losing the range entirely
         nodes[1].stop(wait_other_notice=True)
 
         for i in range(1, 40, 2):
@@ -181,7 +186,7 @@ class TestTransientReplicationRing(Tester):
                     gen_expected(range(0, 21, 2), range(32, 40, 2)),
                     gen_expected(range(1, 11, 2), range(11, 31), range(31, 40, 2))]
 
-        #Every node should have some of its fully replicated data and one and two should have some transient data
+        # Every node should have some of its fully replicated data and one and two should have some transient data
         self.check_expected(sessions, expected)
 
         node4 = new_node(self.cluster, bootstrap=True, token='00040')
@@ -192,21 +197,21 @@ class TestTransientReplicationRing(Tester):
         expected.append(gen_expected(range(11, 20, 2), range(21, 40)))
         sessions.append(self.exclusive_cql_connection(node4))
 
-        #Because repair was never run and nodes had transient data it will have data for transient ranges (node1, 11-20)
+        # Because repair was never run and nodes had transient data it will have data for transient ranges (node1, 11-20)
         assert_all(sessions[3],
                    self.select(),
                    expected[3],
                    cl=NODELOCAL)
 
-        #Node1 no longer transiently replicates 11-20, so cleanup will clean it up
-        #Node1 also now transiently replicates 21-30 and half the values in that range were repaired
+        # Node1 no longer transiently replicates 11-20, so cleanup will clean it up
+        # Node1 also now transiently replicates 21-30 and half the values in that range were repaired
         expected[0] = gen_expected(range(0, 11), range(21, 30, 2), range(31, 40))
-        #Node2 still missing data since it was down during some insertions, it also lost some range (31-40)
+        # Node2 still missing data since it was down during some insertions, it also lost some range (31-40)
         expected[1] = gen_expected(range(0, 21, 2))
         expected[2] = gen_expected(range(1, 11, 2), range(11, 31))
 
-        #Cleanup should only impact if a node lost a range entirely or started to transiently replicate it and the data
-        #was repaired
+        # Cleanup should only impact if a node lost a range entirely or started to transiently replicate it and the data
+        # was repaired
         self.check_expected(sessions, expected, nodes, cleanup=True)
 
         repair_nodes(nodes)
@@ -218,7 +223,7 @@ class TestTransientReplicationRing(Tester):
 
         self.check_expected(sessions, expected, nodes, cleanup=True)
 
-        #Every value should be replicated exactly 2 times
+        # Every value should be replicated exactly 2 times
         self.check_replication(sessions, exactly=2)
 
     @flaky(max_runs=1)
@@ -267,7 +272,6 @@ class TestTransientReplicationRing(Tester):
         self.check_expected(sessions, expected_after_repair, nodes, cleanup=True)
         self.check_replication(sessions, exactly=2)
 
-
     @flaky(max_runs=1)
     @pytest.mark.no_vnodes
     def test_move_forwards_between_and_cleanup(self):
@@ -275,7 +279,7 @@ class TestTransientReplicationRing(Tester):
         move_token = '00025'
         expected_after_move = [gen_expected(range(0, 26), range(31, 40, 2)),
                                gen_expected(range(0, 21, 2), range(31, 40)),
-                               gen_expected(range(1, 11, 2), range(11, 21, 2), range(21,31)),
+                               gen_expected(range(1, 11, 2), range(11, 21, 2), range(21, 31)),
                                gen_expected(range(21, 26, 2), range(26, 40))]
         expected_after_repair = [gen_expected(range(0, 26)),
                                  gen_expected(range(0, 21), range(31, 40)),
@@ -283,7 +287,6 @@ class TestTransientReplicationRing(Tester):
                                  gen_expected(range(26, 40))]
         self.move_test(move_token, expected_after_move, expected_after_repair)
 
-
     @flaky(max_runs=1)
     @pytest.mark.no_vnodes
     def test_move_forwards_and_cleanup(self):
@@ -299,7 +302,6 @@ class TestTransientReplicationRing(Tester):
                                  gen_expected(range(21, 40))]
         self.move_test(move_token, expected_after_move, expected_after_repair)
 
-
     @flaky(max_runs=1)
     @pytest.mark.no_vnodes
     def test_move_backwards_between_and_cleanup(self):
@@ -315,7 +317,6 @@ class TestTransientReplicationRing(Tester):
                                  gen_expected(range(31, 40))]
         self.move_test(move_token, expected_after_move, expected_after_repair)
 
-
     @flaky(max_runs=1)
     @pytest.mark.no_vnodes
     def test_move_backwards_and_cleanup(self):
@@ -331,7 +332,6 @@ class TestTransientReplicationRing(Tester):
                                  gen_expected(range(21, 40))]
         self.move_test(move_token, expected_after_move, expected_after_repair)
 
-
     @flaky(max_runs=1)
     @pytest.mark.no_vnodes
     def test_decommission(self):
@@ -367,7 +367,7 @@ class TestTransientReplicationRing(Tester):
 
         self.check_expected(sessions, expected)
 
-        #node1 has transient data we want to see streamed out on move
+        # node1 has transient data we want to see streamed out on move
         nodes[3].nodetool('decommission')
 
         nodes = nodes[:-1]
@@ -384,7 +384,7 @@ class TestTransientReplicationRing(Tester):
 
         repair_nodes(nodes)
 
-        #There should be no transient data anywhere
+        # There should be no transient data anywhere
         expected = [gen_expected(range(0, 11), range(21, 40)),
                     gen_expected(range(0, 21), range(31, 40)),
                     gen_expected(range(11, 31))]
@@ -392,7 +392,6 @@ class TestTransientReplicationRing(Tester):
         self.check_expected(sessions, expected, nodes, cleanup=True)
         self.check_replication(sessions, exactly=2)
 
-
     @flaky(max_runs=1)
     @pytest.mark.no_vnodes
     def test_remove(self):
@@ -403,13 +402,12 @@ class TestTransientReplicationRing(Tester):
         main_session = self.patient_cql_connection(self.node1)
         nodes = [self.node1, self.node2, self.node3]
 
-        #We want the node being removed to have no data on it so nodetool remove always gets all the necessary data
-        #from survivors
+        # We want the node being removed to have no data on it
+        # so nodetool remove always gets all the necessary data from survivors
         node4_id = node4.nodetool('info').stdout[25:61]
         node4.stop(wait_other_notice=True)
 
         for i in range(0, 40):
-            print("Inserting " + str(i))
             self.insert_row(i, i, i, main_session)
 
         sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3]]
@@ -423,45 +421,34 @@ class TestTransientReplicationRing(Tester):
 
         nodes[0].nodetool('removenode ' + node4_id)
 
-        #Give streaming time to occur, it's asynchronous from removenode completing at other ndoes
+        # Give streaming time to occur, it's asynchronous from removenode completing at other nodes
         import time
         time.sleep(15)
 
-        # Everyone should have everything except
-        expected = [gen_expected(range(0, 40)),
-                    gen_expected(range(0, 40)),
-                    gen_expected(range(0,40))]
+        self._everyone_should_have_everything(sessions)
 
-        self.check_replication(sessions, exactly=3)
-        self.check_expected(sessions, expected)
         repair_nodes(nodes)
         cleanup_nodes(nodes)
 
-        self.check_replication(sessions, exactly=2)
-
-        expected = [gen_expected(range(0,11), range(21,40)),
-                    gen_expected(range(0,21), range(31, 40)),
-                    gen_expected(range(11,31))]
-        self.check_expected(sessions, expected)
+        self._nodes_have_proper_ranges_after_repair_and_cleanup(sessions)
 
     @flaky(max_runs=1)
     @pytest.mark.no_vnodes
     def test_replace(self):
         main_session = self.patient_cql_connection(self.node1)
 
-        #We want the node being replaced to have no data on it so the replacement definitely fetches all the data
+        # We want the node being replaced to have no data on it so the replacement definitely fetches all the data
         self.node2.stop(wait_other_notice=True)
 
         for i in range(0, 40):
-            print("Inserting " + str(i))
             self.insert_row(i, i, i, main_session)
 
         replacement_address = self.node2.address()
         self.node2.stop(wait_other_notice=True)
         self.cluster.remove(self.node2)
         self.node2 = Node('replacement', cluster=self.cluster, auto_bootstrap=True,
-                                         thrift_interface=None, storage_interface=(replacement_address, 7000),
-                                         jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042))
+                          thrift_interface=None, storage_interface=(replacement_address, 7000),
+                          jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042))
         patch_start(self.node2)
         nodes = [self.node1, self.node2, self.node3]
         self.cluster.add(self.node2, False, data_center='datacenter1')
@@ -472,20 +459,21 @@ class TestTransientReplicationRing(Tester):
 
         sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3]]
 
-        # Everyone should have everything
-        expected = [gen_expected(range(0, 40)),
-                    gen_expected(range(0, 40)),
-                    gen_expected(range(0,40))]
-
-        self.check_replication(sessions, exactly=3)
-        self.check_expected(sessions, expected)
+        self._everyone_should_have_everything(sessions)
 
         repair_nodes(nodes)
         cleanup_nodes(nodes)
 
-        self.check_replication(sessions, exactly=2)
+        self._nodes_have_proper_ranges_after_repair_and_cleanup(sessions)
 
-        expected = [gen_expected(range(0,11), range(21,40)),
-                    gen_expected(range(0,21), range(31, 40)),
-                    gen_expected(range(11,31))]
-        self.check_expected(sessions, expected)
\ No newline at end of file
+    def _everyone_should_have_everything(self, sessions):
+        expected = [gen_expected(range(0, 40))] * 3
+        self.check_replication(sessions, exactly=3)
+        self.check_expected(sessions, expected)
+
+    def _nodes_have_proper_ranges_after_repair_and_cleanup(self, sessions):
+        expected = [gen_expected(range(0, 11), range(21, 40)),
+                    gen_expected(range(0, 21), range(31, 40)),
+                    gen_expected(range(11, 31))]
+        self.check_replication(sessions, exactly=2)
+        self.check_expected(sessions, expected)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org