You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by mc...@apache.org on 2020/09/24 09:57:17 UTC

[cassandra-dtest] branch master updated: remove redundant param wait_other_notice=True from Node.start calls

This is an automated email from the ASF dual-hosted git repository.

mck pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/cassandra-dtest.git


The following commit(s) were added to refs/heads/master by this push:
     new ee5dc59  remove redundant param wait_other_notice=True from Node.start calls
ee5dc59 is described below

commit ee5dc59b4cf20be24dd32407369f2ce822914bd7
Author: Christopher Lambert <Ch...@Datastax.com>
AuthorDate: Wed Sep 23 14:25:55 2020 +0200

    remove redundant param wait_other_notice=True from Node.start calls
    
    Since https://github.com/riptano/ccm/pull/464 ccm Node.start() defaults to:
    wait_other_notice=True
    
    Since the presence could suggest that this is a non-default value, we
    clean up the code by removing it.
    
    patch by Christopher Lambert; reviewed by Mick Semb Wever
---
 batch_test.py                                |  4 +--
 bootstrap_test.py                            | 22 +++++++-------
 concurrent_schema_changes_test.py            |  2 +-
 consistency_test.py                          | 38 ++++++++++++------------
 consistent_bootstrap_test.py                 |  4 +--
 counter_test.py                              |  4 +--
 fqltool_test.py                              |  2 +-
 hintedhandoff_test.py                        |  4 +--
 legacy_sstables_test.py                      |  2 +-
 materialized_views_test.py                   | 44 ++++++++++++++--------------
 pushed_notifications_test.py                 |  6 ++--
 rebuild_test.py                              |  6 ++--
 repair_tests/incremental_repair_test.py      | 12 ++++----
 repair_tests/preview_repair_test.py          |  4 +--
 repair_tests/repair_test.py                  | 22 +++++++-------
 replace_address_test.py                      |  2 +-
 replica_side_filtering_test.py               |  2 +-
 secondary_indexes_test.py                    |  6 ++--
 seed_test.py                                 |  2 +-
 transient_replication_ring_test.py           | 16 +++++-----
 upgrade_crc_check_chance_test.py             |  2 +-
 upgrade_internal_auth_test.py                |  2 +-
 upgrade_tests/regression_test.py             |  2 +-
 upgrade_tests/repair_test.py                 |  2 +-
 upgrade_tests/storage_engine_upgrade_test.py |  2 +-
 upgrade_tests/thrift_upgrade_test.py         |  2 +-
 upgrade_tests/upgrade_base.py                |  2 +-
 upgrade_tests/upgrade_compact_storage.py     |  2 +-
 upgrade_tests/upgrade_supercolumns_test.py   |  2 +-
 29 files changed, 111 insertions(+), 111 deletions(-)

diff --git a/batch_test.py b/batch_test.py
index f67a8d7..f10b78a 100644
--- a/batch_test.py
+++ b/batch_test.py
@@ -193,7 +193,7 @@ class TestBatch(Tester):
         """, consistency_level=ConsistencyLevel.ONE)
         session.execute(query)
 
-        self.cluster.nodelist()[-1].start(wait_for_binary_proto=True, wait_other_notice=True)
+        self.cluster.nodelist()[-1].start(wait_for_binary_proto=True)
         assert_all(session, "SELECT * FROM users", [[1, 'Will', 'Turner'], [0, 'Jack', 'Sparrow']],
                    cl=ConsistencyLevel.ALL)
 
@@ -505,4 +505,4 @@ class TestBatch(Tester):
         remove_perf_disable_shared_mem(node)
         # Restart nodes on new version
         logger.debug('Starting {} on new version ({})'.format(node.name, node.get_cassandra_version()))
-        node.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=['-Dcassandra.disable_max_protocol_auto_override=true'])
+        node.start(wait_for_binary_proto=True, jvm_args=['-Dcassandra.disable_max_protocol_auto_override=true'])
diff --git a/bootstrap_test.py b/bootstrap_test.py
index 540aa5c..20e2545 100644
--- a/bootstrap_test.py
+++ b/bootstrap_test.py
@@ -212,7 +212,7 @@ class TestBootstrap(Tester):
 
         # Bootstrapping a new node
         node3 = new_node(cluster)
-        node3.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node3.start(wait_for_binary_proto=True)
 
         assert_bootstrap_state(self, node3, 'COMPLETED')
 
@@ -252,7 +252,7 @@ class TestBootstrap(Tester):
              node1 = cluster.nodelist()[0]
 
              logger.debug("Start node 1")
-             node1.start(wait_for_binary_proto=True, wait_other_notice=True)
+             node1.start(wait_for_binary_proto=True)
 
              logger.debug("Insert 10k rows")
              node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor=2)'])
@@ -260,7 +260,7 @@ class TestBootstrap(Tester):
              logger.debug("Bootstrap node 2 with delay")
              node2 = new_node(cluster, byteman_port='4200')
              node2.update_startup_byteman_script('./byteman/bootstrap_5s_sleep.btm')
-             node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+             node2.start(wait_for_binary_proto=True)
 
              assert_bootstrap_state(self, node2, 'COMPLETED')
              assert node2.grep_log('Bootstrap completed', filename='debug.log')
@@ -561,7 +561,7 @@ class TestBootstrap(Tester):
 
         # Add a new node, bootstrap=True ensures that it is not a seed
         node4 = new_node(cluster, bootstrap=True)
-        node4.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node4.start(wait_for_binary_proto=True)
 
         session = self.patient_cql_connection(node4)
         assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,)))
@@ -572,7 +572,7 @@ class TestBootstrap(Tester):
         self._cleanup(node4)
         # Now start it, it should be allowed to join
         mark = node4.mark_log()
-        node4.start(wait_other_notice=True)
+        node4.start()
         node4.watch_log_for("JOINING:", from_mark=mark)
 
     def test_decommissioned_wiped_node_can_gossip_to_single_seed(self):
@@ -589,7 +589,7 @@ class TestBootstrap(Tester):
         node1 = cluster.nodelist()[0]
         # Add a new node, bootstrap=True ensures that it is not a seed
         node2 = new_node(cluster, bootstrap=True)
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
 
         session = self.patient_cql_connection(node1)
 
@@ -654,7 +654,7 @@ class TestBootstrap(Tester):
         self._cleanup(node2)
         # Now start it again, it should be allowed to join
         mark = node2.mark_log()
-        node2.start(wait_other_notice=True)
+        node2.start()
         node2.watch_log_for("JOINING:", from_mark=mark)
 
     @since('3.0')
@@ -766,7 +766,7 @@ class TestBootstrap(Tester):
                       '-rate', 'threads=10'])
 
         node2 = new_node(cluster)
-        node2.start(wait_other_notice=True)
+        node2.start()
 
         node3 = new_node(cluster, remote_debug_port='2003')
         try:
@@ -801,7 +801,7 @@ class TestBootstrap(Tester):
             node1.stress(['write', 'n=100k', 'no-warmup', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', 'replication(factor=1)', '-rate', 'threads=10'])
             node1.flush()
         node2 = new_node(cluster)
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
         event = threading.Event()
         failed = threading.Event()
         jobs = 1
@@ -868,7 +868,7 @@ class TestBootstrap(Tester):
         node2.set_configuration_options(values=config)
         node2.byteman_port = '8101' # set for when we add node3
         node2.import_config_files()
-        node2.start(jvm_args=["-Dcassandra.ring_delay_ms=5000"], wait_other_notice=True)
+        node2.start(jvm_args=["-Dcassandra.ring_delay_ms=5000"])
         self.assert_log_had_msg(node2, 'Some data streaming failed')
 
         if self.cluster.version() >= LooseVersion('4.0'):
@@ -895,7 +895,7 @@ class TestBootstrap(Tester):
         else:
             node1.byteman_submit([self.byteman_submit_path_4_0])
             node2.byteman_submit([self.byteman_submit_path_4_0])
-        node3.start(jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.ring_delay_ms=5000"], wait_other_notice=True)
+        node3.start(jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.ring_delay_ms=5000"])
         self.assert_log_had_msg(node3, 'Some data streaming failed')
         self.assert_log_had_msg(node3, "Not starting client transports in write_survey mode as it's bootstrapping or auth is enabled")
 
diff --git a/concurrent_schema_changes_test.py b/concurrent_schema_changes_test.py
index fba75e4..1b9d8e7 100644
--- a/concurrent_schema_changes_test.py
+++ b/concurrent_schema_changes_test.py
@@ -350,7 +350,7 @@ class TestConcurrentSchemaChanges(Tester):
         node2.stop()
         self._do_lots_of_schema_actions(session)
         wait(15)
-        node2.start(wait_other_notice=True)
+        node2.start()
         logger.debug("waiting for things to settle and sync")
         wait(120)
         self._verify_lots_of_schema_actions(session)
diff --git a/consistency_test.py b/consistency_test.py
index af4a84d..1ee183f 100644
--- a/consistency_test.py
+++ b/consistency_test.py
@@ -881,7 +881,7 @@ class TestConsistency(Tester):
 
         # take node2 down, get node3 up
         node2.stop(wait_other_notice=True)
-        node3.start(wait_other_notice=True)
+        node3.start()
 
         # insert an RT somewhere so that we would have a closing marker and RR makes its mutations
         stmt = SimpleStatement("DELETE FROM journals.logs WHERE user = 'beobal' AND year = 2010 AND month = 12 AND day = 30",
@@ -943,7 +943,7 @@ class TestConsistency(Tester):
 
         node1.stop(wait_other_notice=True)
         session.execute('DELETE FROM test.test USING TIMESTAMP 1 WHERE pk = 0 AND ck = 0;')
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         # with both nodes up, make a LIMIT 1 read that would trigger a short read protection
         # request, which in turn will trigger the AssertionError in DataResolver (prior to
@@ -983,7 +983,7 @@ class TestConsistency(Tester):
 
         node2.stop(wait_other_notice=True)
         session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0);')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
 
         # with node1 down, delete row 1 and 2 on node2
         #
@@ -994,7 +994,7 @@ class TestConsistency(Tester):
 
         node1.stop(wait_other_notice=True)
         session.execute('DELETE FROM test.test WHERE pk = 0 AND ck IN (1, 2);')
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         # with both nodes up, do a CL.ALL query with per partition limit of 1;
         # prior to CASSANDRA-13911 this would trigger an IllegalStateException
@@ -1039,7 +1039,7 @@ class TestConsistency(Tester):
         session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 1) USING TIMESTAMP 42;')
         session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 0) USING TIMESTAMP 42;')
         session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck = 1;')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
 
         # with node1 down
         #
@@ -1054,7 +1054,7 @@ class TestConsistency(Tester):
         session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck = 0;')
         session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 1) USING TIMESTAMP 42;')
         session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 2) USING TIMESTAMP 42;')
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         # with both nodes up, do a CL.ALL query with per partition limit of 2 and limit of 3;
         # without the change to if (!singleResultCounter.isDoneForPartition()) branch,
@@ -1101,7 +1101,7 @@ class TestConsistency(Tester):
         session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0) USING TIMESTAMP 42;')
         session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 1) USING TIMESTAMP 42;')
         session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck IN  (0, 1);')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
 
         # with node1 down
         #
@@ -1118,7 +1118,7 @@ class TestConsistency(Tester):
         session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 1) USING TIMESTAMP 42;')
         session.execute('INSERT INTO test.test (pk, ck) VALUES (4, 0) USING TIMESTAMP 42;')
         session.execute('INSERT INTO test.test (pk, ck) VALUES (4, 1) USING TIMESTAMP 42;')
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         # with both nodes up, do a CL.ALL query with per partition limit of 2 and limit of 4;
         # without the extra condition in if (!singleResultCounter.isDone()) branch,
@@ -1159,7 +1159,7 @@ class TestConsistency(Tester):
         # with node2 down and hints disabled, delete the partition on node1
         node2.stop(wait_other_notice=True)
         session.execute("DELETE FROM test.test WHERE id = 0;")
-        node2.start(wait_other_notice=True)
+        node2.start()
 
         # with both nodes up, do a CL.ALL query with per partition limit of 1;
         # prior to CASSANDRA-13880 this would cause short read protection to loop forever
@@ -1212,7 +1212,7 @@ class TestConsistency(Tester):
 
         # with both nodes up, do a DISTINCT range query with CL.ALL;
         # prior to CASSANDRA-13747 this would cause an assertion in short read protection code
-        node2.start(wait_other_notice=True)
+        node2.start()
         stmt = SimpleStatement("SELECT DISTINCT token(id), id FROM test.test;",
                                consistency_level=ConsistencyLevel.ALL)
         result = list(session.execute(stmt))
@@ -1265,14 +1265,14 @@ class TestConsistency(Tester):
         # delete every other partition on node1 while node2 is down
         node2.stop(wait_other_notice=True)
         session.execute('DELETE FROM test.test WHERE id IN (5, 8, 2, 7, 9);')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
 
         session = self.patient_cql_connection(node2)
 
         # delete every other alternate partition on node2 while node1 is down
         node1.stop(wait_other_notice=True)
         session.execute('DELETE FROM test.test WHERE id IN (1, 0, 4, 6);')
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         session = self.patient_exclusive_cql_connection(node1)
 
@@ -1331,7 +1331,7 @@ class TestConsistency(Tester):
         # node1 |   up | 0 x x x   5
         # node2 | down | 0 1 2 3
 
-        node2.start(wait_other_notice=True)
+        node2.start()
         node1.stop(wait_other_notice=True)
 
         # node1 | down | 0 x x x   5
@@ -1344,7 +1344,7 @@ class TestConsistency(Tester):
         # node1 | down | 0 x x x   5
         # node2 |   up | 0 1 2 3 4
 
-        node1.start(wait_other_notice=True)
+        node1.start()
 
         # node1 |   up | 0 x x x   5
         # node2 |   up | 0 1 2 3 4
@@ -1453,7 +1453,7 @@ class TestConsistency(Tester):
         query = SimpleStatement('DELETE FROM cf WHERE key=\'k0\'', consistency_level=ConsistencyLevel.ONE)
         session.execute(query)
 
-        node1.start(wait_other_notice=True)
+        node1.start()
 
         # Query first column
         session = self.patient_cql_connection(node1, 'ks')
@@ -1491,14 +1491,14 @@ class TestConsistency(Tester):
         node2.flush()
         node2.stop(wait_other_notice=True)
         session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 1", consistency_level=ConsistencyLevel.QUORUM))
-        node2.start(wait_other_notice=True)
+        node2.start()
 
         # we delete 2: only B and C get it.
         node1.flush()
         node1.stop(wait_other_notice=True)
         session = self.patient_cql_connection(node2, 'ks')
         session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 2", consistency_level=ConsistencyLevel.QUORUM))
-        node1.start(wait_other_notice=True)
+        node1.start()
         session = self.patient_cql_connection(node1, 'ks')
 
         # we read the first row in the partition (so with a LIMIT 1) and A and B answer first.
@@ -1528,7 +1528,7 @@ class TestConsistency(Tester):
 
         insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)
 
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
 
         # query everything to cause RR
         for n in range(0, 10000):
@@ -1588,4 +1588,4 @@ class TestConsistency(Tester):
 
     def restart_node(self, node_number):
         stopped_node = self.cluster.nodes["node%d" % node_number]
-        stopped_node.start(wait_for_binary_proto=True, wait_other_notice=True)
+        stopped_node.start(wait_for_binary_proto=True)
diff --git a/consistent_bootstrap_test.py b/consistent_bootstrap_test.py
index 073e8cd..fe7fccc 100644
--- a/consistent_bootstrap_test.py
+++ b/consistent_bootstrap_test.py
@@ -42,7 +42,7 @@ class TestBootstrapConsistency(Tester):
         node2.flush()
 
         logger.debug("Restart node1")
-        node1.start(wait_other_notice=True)
+        node1.start()
 
         logger.debug("Move token on node3")
         node3.move(2)
@@ -84,7 +84,7 @@ class TestBootstrapConsistency(Tester):
         node2.flush()
 
         logger.debug("Restart node1")
-        node1.start(wait_other_notice=True)
+        node1.start()
 
         logger.debug("Bootstraping node3")
         node3 = new_node(cluster)
diff --git a/counter_test.py b/counter_test.py
index 2254038..54257fd 100644
--- a/counter_test.py
+++ b/counter_test.py
@@ -69,7 +69,7 @@ class TestCounters(Tester):
 
         node3.stop(wait_other_notice=True)
         node3.set_install_dir(install_dir=default_install_dir)
-        node3.start(wait_other_notice=True)
+        node3.start()
 
         #
         # with a 2.1 coordinator, try to read the table with CL.ALL
@@ -205,7 +205,7 @@ class TestCounters(Tester):
                 time.sleep(.2)
                 nodes[i].nodetool("drain")
                 nodes[i].stop(wait_other_notice=False)
-                nodes[i].start(wait_other_notice=True, wait_for_binary_proto=True)
+                nodes[i].start(wait_for_binary_proto=True)
                 time.sleep(.2)
 
         make_updates()
diff --git a/fqltool_test.py b/fqltool_test.py
index 1fa6bc0..9782650 100644
--- a/fqltool_test.py
+++ b/fqltool_test.py
@@ -42,7 +42,7 @@ class TestFQLTool(Tester):
             node2.clear()
 
             node1.start(wait_for_binary_proto=True)
-            node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+            node2.start(wait_for_binary_proto=True)
             # make sure the node is empty:
             got_exception = False
             try:
diff --git a/hintedhandoff_test.py b/hintedhandoff_test.py
index d0c1709..dc5d64c 100644
--- a/hintedhandoff_test.py
+++ b/hintedhandoff_test.py
@@ -60,7 +60,7 @@ class TestHintedHandoffConfig(Tester):
         insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)
 
         log_mark = node1.mark_log()
-        node2.start(wait_other_notice=True)
+        node2.start()
 
         if enabled:
             node1.watch_log_for(["Finished hinted"], from_mark=log_mark, timeout=120)
@@ -139,7 +139,7 @@ class TestHintedHandoffConfig(Tester):
         res = self._launch_nodetool_cmd(node, 'getmaxhintwindow')
         assert 'Current max hint window: 300000 ms' == res.rstrip()
         self._do_hinted_handoff(node1, node2, True)
-        node1.start(wait_other_notice=True)
+        node1.start()
         self._launch_nodetool_cmd(node, 'setmaxhintwindow 1')
         res = self._launch_nodetool_cmd(node, 'getmaxhintwindow')
         assert 'Current max hint window: 1 ms' == res.rstrip()
diff --git a/legacy_sstables_test.py b/legacy_sstables_test.py
index 8dc15e9..c8c400e 100644
--- a/legacy_sstables_test.py
+++ b/legacy_sstables_test.py
@@ -47,7 +47,7 @@ class TestLegacySSTables(Tester):
         # stop, upgrade to current version (3.0 or 3.11), start up
         node1.stop(wait_other_notice=True)
         self.set_node_to_current_version(node1)
-        node1.start(wait_other_notice=True)
+        node1.start()
         session = self.patient_cql_connection(node1)
 
         # make sure all 4 rows are there when reading backwards
diff --git a/materialized_views_test.py b/materialized_views_test.py
index 7305015..60b49a5 100644
--- a/materialized_views_test.py
+++ b/materialized_views_test.py
@@ -534,11 +534,11 @@ class TestMaterializedViews(Tester):
 
         logger.debug("Bootstrapping new node in another dc")
         node4 = new_node(self.cluster, data_center='dc2')
-        node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
+        node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
 
         logger.debug("Bootstrapping new node in another dc")
         node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
-        node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)], wait_other_notice=True, wait_for_binary_proto=True)
+        node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)], wait_for_binary_proto=True)
         if nts:
             session.execute("alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
             session.execute("alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
@@ -1604,9 +1604,9 @@ class TestMaterializedViews(Tester):
         self.update_view(session, query, flush)
 
         logger.debug('Starting node2')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
         logger.debug('Starting node3')
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
 
         # For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired
         query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 1", consistency_level=ConsistencyLevel.ALL)
@@ -1820,7 +1820,7 @@ class TestMaterializedViews(Tester):
             [1, 1, 'b', 3.0]
         )
 
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
 
         # We should get a digest mismatch
         query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
@@ -1912,7 +1912,7 @@ class TestMaterializedViews(Tester):
             )
 
         logger.debug('Start node2, and repair')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
         if repair_base:
             node1.nodetool("repair ks t")
         if repair_view:
@@ -1982,7 +1982,7 @@ class TestMaterializedViews(Tester):
             # this should not make Keyspace.apply throw WTE on failure to acquire lock
             node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})
         logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=jvm_args)
+        node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)
         logger.debug('Shutdown node2 and node3')
         node2.stop(wait_other_notice=True)
         node3.stop(wait_other_notice=True)
@@ -1998,8 +1998,8 @@ class TestMaterializedViews(Tester):
             )
 
         logger.debug('Restarting node2 and node3')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
 
         # Just repair the base replica
         logger.debug('Starting repair on node1')
@@ -2054,7 +2054,7 @@ class TestMaterializedViews(Tester):
 
         logger.debug('Start nodes 2 and 3')
         node2.start()
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
 
         session2 = self.patient_cql_connection(node2)
 
@@ -2083,9 +2083,9 @@ class TestMaterializedViews(Tester):
         time.sleep(5)
 
         logger.debug('Start remaining nodes')
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
-        node4.start(wait_other_notice=True, wait_for_binary_proto=True)
-        node5.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
+        node4.start(wait_for_binary_proto=True)
+        node5.start(wait_for_binary_proto=True)
 
         session = self.patient_cql_connection(node1)
 
@@ -2180,10 +2180,10 @@ class TestMaterializedViews(Tester):
 
         # start nodes with different batch size
         logger.debug('Starting nodes')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(2)])
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5)])
-        node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(50)])
-        node5.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5000)])
+        node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(2)])
+        node3.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5)])
+        node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(50)])
+        node5.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5000)])
         self._replay_batchlogs()
 
         logger.debug('repairing base table')
@@ -2199,7 +2199,7 @@ class TestMaterializedViews(Tester):
         logger.debug('rolling restart to check repaired data on each node')
         for node in self.cluster.nodelist():
             logger.debug('starting {}'.format(node.name))
-            node.start(wait_other_notice=True, wait_for_binary_proto=True)
+            node.start(wait_for_binary_proto=True)
             session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
             for ck1 in range(size):
                 for ck2 in range(size):
@@ -2270,7 +2270,7 @@ class TestMaterializedViews(Tester):
 
         logger.debug('Start nodes 2 and 3')
         node2.start()
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
 
         session2 = self.patient_cql_connection(node2)
         session2.execute('USE ks')
@@ -2300,9 +2300,9 @@ class TestMaterializedViews(Tester):
         time.sleep(5)
 
         logger.debug('Start remaining nodes')
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
-        node4.start(wait_other_notice=True, wait_for_binary_proto=True)
-        node5.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
+        node4.start(wait_for_binary_proto=True)
+        node5.start(wait_for_binary_proto=True)
 
         # at this point the data isn't repaired so we have an inconsistency.
         # this value should return None
diff --git a/pushed_notifications_test.py b/pushed_notifications_test.py
index b67c626..6ee6d28 100644
--- a/pushed_notifications_test.py
+++ b/pushed_notifications_test.py
@@ -191,7 +191,7 @@ class TestPushedNotifications(Tester):
         for i in range(5):
             logger.debug("Restarting second node...")
             node2.stop(wait_other_notice=True)
-            node2.start(wait_other_notice=True)
+            node2.start()
             logger.debug("Waiting for notifications from {}".format(waiter.address))
             notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=expected_notifications)
             assert expected_notifications, len(notifications) == notifications
@@ -237,7 +237,7 @@ class TestPushedNotifications(Tester):
 
         logger.debug("Restarting second node...")
         node2.stop(wait_other_notice=True)
-        node2.start(wait_other_notice=True)
+        node2.start()
 
         # check that node1 did not send UP or DOWN notification for node2
         logger.debug("Waiting for notifications from {}".format(waiter.address,))
@@ -279,7 +279,7 @@ class TestPushedNotifications(Tester):
         logger.debug("Adding second node...")
         node2 = Node('node2', self.cluster, True, None, ('127.0.0.2', 7000), '7200', '0', None, binary_interface=('127.0.0.2', 9042))
         self.cluster.add(node2, False)
-        node2.start(wait_other_notice=True)
+        node2.start()
         logger.debug("Waiting for notifications from {}".format(waiter.address))
         notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
         assert 2 == len(notifications), notifications
diff --git a/rebuild_test.py b/rebuild_test.py
index 91bbfdd..8e662f0 100644
--- a/rebuild_test.py
+++ b/rebuild_test.py
@@ -69,7 +69,7 @@ class TestRebuild(Tester):
                                     '7200', '2001', None,
                                     binary_interface=('127.0.0.2', 9042))
         cluster.add(node2, False, data_center='dc2')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
 
         # wait for snitch to reload
         time.sleep(60)
@@ -284,7 +284,7 @@ class TestRebuild(Tester):
                                     binary_interface=('127.0.0.2', 9042))
         node2.set_configuration_options(values={'initial_token': tokens[1]})
         cluster.add(node2, False, data_center='dc2')
-        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node2.start(wait_for_binary_proto=True)
 
         # wait for snitch to reload
         time.sleep(60)
@@ -422,7 +422,7 @@ class TestRebuild(Tester):
                                     '7300', '2002', tokens[2],
                                     binary_interface=('127.0.0.3', 9042))
         cluster.add(node3, False, data_center='dc3')
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
 
         # wait for snitch to reload
         time.sleep(60)
diff --git a/repair_tests/incremental_repair_test.py b/repair_tests/incremental_repair_test.py
index 4920fc1..9cd8c75 100644
--- a/repair_tests/incremental_repair_test.py
+++ b/repair_tests/incremental_repair_test.py
@@ -119,11 +119,11 @@ class TestIncRepair(Tester):
         node1.flush()
         time.sleep(1)
         node1.stop(gently=False)
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
         session = self.exclusive_cql_connection(node2)
         for i in range(10):
             session.execute(stmt, (i + 20, i + 20))
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         # flush and check that no sstables are marked repaired
         for node in self.cluster.nodelist():
@@ -346,7 +346,7 @@ class TestIncRepair(Tester):
         node1.flush()
         node2.flush()
 
-        node3.start(wait_other_notice=True)
+        node3.start()
         if node3.get_cassandra_version() < '2.2':
             log_file = 'system.log'
         else:
@@ -443,7 +443,7 @@ class TestIncRepair(Tester):
         node3.stop(gently=False)
         node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))
         cluster.add(node5, False)
-        node5.start(replace_address='127.0.0.3', wait_other_notice=True)
+        node5.start(replace_address='127.0.0.3')
 
         assert_one(session, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
 
@@ -956,7 +956,7 @@ class TestIncRepair(Tester):
         session.execute("delete from ks.tbl where k = 5")
 
         node1.flush()
-        node2.start(wait_other_notice=True)
+        node2.start()
 
         # expect unconfirmed inconsistencies as the partition deletes cause some sstables to be skipped
         with JolokiaAgent(node1) as jmx:
@@ -1005,7 +1005,7 @@ class TestIncRepair(Tester):
         node1.flush()
         node1.compact()
         node1.compact()
-        node2.start(wait_other_notice=True)
+        node2.start()
 
         # we don't expect any inconsistencies as all repaired data is read on both replicas
         with JolokiaAgent(node1) as jmx:
diff --git a/repair_tests/preview_repair_test.py b/repair_tests/preview_repair_test.py
index ee5a38d..9cd2d40 100644
--- a/repair_tests/preview_repair_test.py
+++ b/repair_tests/preview_repair_test.py
@@ -51,11 +51,11 @@ class TestPreviewRepair(Tester):
         node1.flush()
         time.sleep(1)
         node1.stop(gently=False)
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
         session = self.exclusive_cql_connection(node2)
         for i in range(10):
             session.execute(stmt, (i + 20, i + 20))
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         # data should not be in sync for full and unrepaired previews
         result = node1.repair(options=['ks', '--preview'])
diff --git a/repair_tests/repair_test.py b/repair_tests/repair_test.py
index 7d1949a..07d4576 100644
--- a/repair_tests/repair_test.py
+++ b/repair_tests/repair_test.py
@@ -85,7 +85,7 @@ class BaseRepairTest(Tester):
 
         if restart:
             for node in stopped_nodes:
-                node.start(wait_for_binary_proto=True, wait_other_notice=True)
+                node.start(wait_for_binary_proto=True)
 
     def _populate_cluster(self, start=True):
         cluster = self.cluster
@@ -111,7 +111,7 @@ class BaseRepairTest(Tester):
         node3.flush()
         node3.stop(wait_other_notice=True)
         insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.TWO)
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
         insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL)
 
         cluster.flush()
@@ -319,7 +319,7 @@ class TestRepair(BaseRepairTest):
         # stop node2, stress and start full repair to find out how synced ranges affect repairedAt values
         node2.stop(wait_other_notice=True)
         node1.stress(stress_options=['write', 'n=40K', 'no-warmup', 'cl=ONE', '-rate', 'threads=50'])
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
         node1.nodetool("repair -full -pr keyspace1 standard1")
 
         meta = self._get_repaired_data(node1, 'keyspace1')
@@ -509,7 +509,7 @@ class TestRepair(BaseRepairTest):
         time.sleep(2)
 
         # bring up node2 and repair
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
         node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
 
         # check no rows will be returned
@@ -726,7 +726,7 @@ class TestRepair(BaseRepairTest):
         node2.flush()
         node2.stop(wait_other_notice=True)
         insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.THREE)
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
         node1.watch_log_for_alive(node2)
         insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL)
 
@@ -829,7 +829,7 @@ class TestRepair(BaseRepairTest):
         node2.flush()
         node2.stop(wait_other_notice=True)
         node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
         node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K'])
         cluster.flush()
 
@@ -931,7 +931,7 @@ class TestRepair(BaseRepairTest):
         node2.stop(wait_other_notice=True)
 
         node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
 
         node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K'])
 
@@ -977,7 +977,7 @@ class TestRepair(BaseRepairTest):
         node2.stop(wait_other_notice=True)
 
         node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
-        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node2.start(wait_for_binary_proto=True)
 
         cluster.flush()
 
@@ -1024,7 +1024,7 @@ class TestRepair(BaseRepairTest):
 
             node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate',
                           'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count), 2 * (job_thread_count + 1))])
-            node2.start(wait_for_binary_proto=True, wait_other_notice=True)
+            node2.start(wait_for_binary_proto=True)
 
             cluster.flush()
             session = self.patient_cql_connection(node1)
@@ -1120,7 +1120,7 @@ class TestRepair(BaseRepairTest):
         node1.stop(gently=False, wait_other_notice=True)
         t1.join()
         logger.debug("starting node1 - first repair should have failed")
-        node1.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node1.start(wait_for_binary_proto=True)
         logger.debug("running second repair")
         if cluster.version() >= "2.2":
             node1.repair()
@@ -1207,7 +1207,7 @@ class TestRepair(BaseRepairTest):
                                      '-rate', 'threads=10'])
 
         logger.debug("bring back node3")
-        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node3.start(wait_for_binary_proto=True)
 
         if phase == 'sync':
             script = 'stream_sleep.btm'
diff --git a/replace_address_test.py b/replace_address_test.py
index 18b01a3..18a94ae 100644
--- a/replace_address_test.py
+++ b/replace_address_test.py
@@ -215,7 +215,7 @@ class BaseReplaceAddressTest(Tester):
                 if node.is_running() and node != self.query_node:
                     logger.debug("Upgrading {} to current version".format(node.address()))
                     node.stop(gently=True, wait_other_notice=True)
-                    node.start(wait_other_notice=True, wait_for_binary_proto=True)
+                    node.start(wait_for_binary_proto=True)
 
         # start node in current version on write survey mode
         self._do_replace(same_address=same_address, extra_jvm_args=["-Dcassandra.write_survey=true"])
diff --git a/replica_side_filtering_test.py b/replica_side_filtering_test.py
index c38fcfc..b33e401 100644
--- a/replica_side_filtering_test.py
+++ b/replica_side_filtering_test.py
@@ -70,7 +70,7 @@ class ReplicaSideFiltering(Tester):
         session = self.patient_cql_connection(node_to_update, keyspace, consistency_level=CL.ONE)
         for q in queries:
             session.execute(q)
-        node_to_stop.start(wait_other_notice=True)
+        node_to_stop.start()
 
     def _assert_none(self, query):
         """
diff --git a/secondary_indexes_test.py b/secondary_indexes_test.py
index 7b9459b..d32f9e3 100644
--- a/secondary_indexes_test.py
+++ b/secondary_indexes_test.py
@@ -1121,7 +1121,7 @@ class TestUpgradeSecondaryIndexes(Tester):
         node1.stop(wait_other_notice=False)
         logger.debug("Upgrading to current version")
         self.set_node_to_current_version(node1)
-        node1.start(wait_other_notice=True)
+        node1.start()
 
         [node1] = cluster.nodelist()
         session = self.patient_cql_connection(node1)
@@ -1150,7 +1150,7 @@ class TestUpgradeSecondaryIndexes(Tester):
             logger.debug('Starting %s on new version (%s)' % (node.name, tag))
             # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
             node.set_log_level("INFO")
-            node.start(wait_other_notice=True)
+            node.start()
             # node.nodetool('upgradesstables -a')
 
 
@@ -1223,7 +1223,7 @@ class TestPreJoinCallback(Tester):
                 yaml_opts['streaming_socket_timeout_in_ms'] = 1000
 
             node2.set_configuration_options(values=yaml_opts)
-            node2.start(wait_other_notice=True, wait_for_binary_proto=False)
+            node2.start(wait_for_binary_proto=False)
             node2.watch_log_for('Some data streaming failed. Use nodetool to check bootstrap state and resume.')
 
             node2.nodetool("bootstrap resume")
diff --git a/seed_test.py b/seed_test.py
index 1f61504..494661c 100644
--- a/seed_test.py
+++ b/seed_test.py
@@ -90,5 +90,5 @@ class TestGossiper(Tester):
         node2.watch_log_for('Starting shadow gossip round to check for endpoint collision', filename='debug.log')
         sleep(RING_DELAY / 1000)
         # Start seed, ensure node2 joins before it exits shadow round.
-        node1.start(wait_other_notice=True, wait_for_binary_proto=120)
+        node1.start(wait_for_binary_proto=120)
         self.assert_log_had_msg(node2, 'Starting listening for CQL clients', timeout=60)
diff --git a/transient_replication_ring_test.py b/transient_replication_ring_test.py
index 8a59fd1..e9ccfa1 100644
--- a/transient_replication_ring_test.py
+++ b/transient_replication_ring_test.py
@@ -173,7 +173,7 @@ class TestTransientReplicationRing(Tester):
         for i in range(1, 40, 2):
             self.insert_row(i, i, i, main_session)
 
-        nodes[1].start(wait_for_binary_proto=True, wait_other_notice=True)
+        nodes[1].start(wait_for_binary_proto=True)
 
         sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3]]
 
@@ -187,7 +187,7 @@ class TestTransientReplicationRing(Tester):
         node4 = new_node(self.cluster, bootstrap=True, token='00040')
         patch_start(node4)
         nodes.append(node4)
-        node4.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node4.start(wait_for_binary_proto=True)
 
         expected.append(gen_expected(range(11, 20, 2), range(21, 40)))
         sessions.append(self.exclusive_cql_connection(node4))
@@ -227,7 +227,7 @@ class TestTransientReplicationRing(Tester):
         """Helper method to run a move test cycle"""
         node4 = new_node(self.cluster, bootstrap=True, token='00040')
         patch_start(node4)
-        node4.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node4.start(wait_for_binary_proto=True)
         main_session = self.patient_cql_connection(self.node1)
         nodes = [self.node1, self.node2, self.node3, node4]
 
@@ -246,7 +246,7 @@ class TestTransientReplicationRing(Tester):
             print("Inserting " + str(i))
             self.insert_row(i, i, i, main_session)
 
-        nodes[1].start(wait_for_binary_proto=True, wait_other_notice=True)
+        nodes[1].start(wait_for_binary_proto=True)
         sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3, node4]]
 
         expected = [gen_expected(range(0, 11), range(31, 40)),
@@ -338,7 +338,7 @@ class TestTransientReplicationRing(Tester):
         """Test decommissioning a node correctly streams out all the data"""
         node4 = new_node(self.cluster, bootstrap=True, token='00040')
         patch_start(node4)
-        node4.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node4.start(wait_for_binary_proto=True)
         main_session = self.patient_cql_connection(self.node1)
         nodes = [self.node1, self.node2, self.node3, node4]
 
@@ -357,7 +357,7 @@ class TestTransientReplicationRing(Tester):
             print("Inserting " + str(i))
             self.insert_row(i, i, i, main_session)
 
-        nodes[1].start(wait_for_binary_proto=True, wait_other_notice=True)
+        nodes[1].start(wait_for_binary_proto=True)
         sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3, node4]]
 
         expected = [gen_expected(range(0, 11), range(31, 40)),
@@ -399,7 +399,7 @@ class TestTransientReplicationRing(Tester):
         """Test  a mix of ring change operations across a mix of transient and repaired/unrepaired data"""
         node4 = new_node(self.cluster, bootstrap=True, token='00040')
         patch_start(node4)
-        node4.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node4.start(wait_for_binary_proto=True)
         main_session = self.patient_cql_connection(self.node1)
         nodes = [self.node1, self.node2, self.node3]
 
@@ -468,7 +468,7 @@ class TestTransientReplicationRing(Tester):
         jvm_args = ["-Dcassandra.replace_address=%s" % replacement_address,
                     "-Dcassandra.ring_delay_ms=10000",
                     "-Dcassandra.broadcast_interval_ms=10000"]
-        self.node2.start(jvm_args=jvm_args, wait_for_binary_proto=True, wait_other_notice=True)
+        self.node2.start(jvm_args=jvm_args, wait_for_binary_proto=True)
 
         sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3]]
 
diff --git a/upgrade_crc_check_chance_test.py b/upgrade_crc_check_chance_test.py
index 3ad1b59..9347f17 100644
--- a/upgrade_crc_check_chance_test.py
+++ b/upgrade_crc_check_chance_test.py
@@ -136,7 +136,7 @@ class TestCrcCheckChanceUpgrade(Tester):
         logger.debug('Starting {node} on new version ({tag})'.format(**format_args))
         # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
         node.set_log_level("INFO")
-        node.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node.start(wait_for_binary_proto=True)
 
         logger.debug('Running upgradesstables')
         node.nodetool('upgradesstables -a')
diff --git a/upgrade_internal_auth_test.py b/upgrade_internal_auth_test.py
index b86c00e..ffc4cab 100644
--- a/upgrade_internal_auth_test.py
+++ b/upgrade_internal_auth_test.py
@@ -222,7 +222,7 @@ class TestAuthUpgrade(Tester):
         logger.debug('Starting {node} on new version ({tag})'.format(**format_args))
         # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
         node.set_log_level("INFO")
-        node.start(wait_other_notice=True)
+        node.start()
         # wait for the conversion of legacy data to either complete or fail
         # (because not enough upgraded nodes are available yet)
         logger.debug('Waiting for conversion of legacy data to complete or fail')
diff --git a/upgrade_tests/regression_test.py b/upgrade_tests/regression_test.py
index e36e19a..ae101ec 100644
--- a/upgrade_tests/regression_test.py
+++ b/upgrade_tests/regression_test.py
@@ -105,7 +105,7 @@ class TestForRegressions(UpgradeTester):
                     if first_sstable == '' and '-Data' in new_filename:
                         first_sstable = new_filename  # we should compact this
                 mul = mul * 10
-        node1.start(wait_other_notice=True)
+        node1.start()
         sessions = self.do_upgrade(session)
         checked = False
         for is_upgraded, cursor in sessions:
diff --git a/upgrade_tests/repair_test.py b/upgrade_tests/repair_test.py
index 36eb5cc..93ec6dd 100644
--- a/upgrade_tests/repair_test.py
+++ b/upgrade_tests/repair_test.py
@@ -42,6 +42,6 @@ class TestUpgradeRepair(BaseRepairTest):
                 time.sleep(1)
                 node.stop(wait_other_notice=True)
             node.set_install_dir(install_dir=default_install_dir)
-            node.start(wait_other_notice=True, wait_for_binary_proto=True)
+            node.start(wait_for_binary_proto=True)
             cursor = self.patient_cql_connection(node)
         cluster.set_install_dir(default_install_dir)
diff --git a/upgrade_tests/storage_engine_upgrade_test.py b/upgrade_tests/storage_engine_upgrade_test.py
index a63ef85..c59984e 100644
--- a/upgrade_tests/storage_engine_upgrade_test.py
+++ b/upgrade_tests/storage_engine_upgrade_test.py
@@ -62,7 +62,7 @@ class TestStorageEngineUpgrade(Tester):
         node1.stop(wait_other_notice=True)
 
         node1.set_install_dir(install_dir=self.fixture_dtest_setup.default_install_dir)
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         if self.fixture_dtest_setup.bootstrap:
             cluster.set_install_dir(install_dir=self.fixture_dtest_setup.default_install_dir)
diff --git a/upgrade_tests/thrift_upgrade_test.py b/upgrade_tests/thrift_upgrade_test.py
index 8846625..878d265 100644
--- a/upgrade_tests/thrift_upgrade_test.py
+++ b/upgrade_tests/thrift_upgrade_test.py
@@ -217,7 +217,7 @@ class TestUpgradeSuperColumnsThrough(Tester):
             logger.debug('Starting %s on new version (%s)' % (node.name, tag))
             # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
             node.set_log_level("INFO")
-            node.start(wait_other_notice=True, wait_for_binary_proto=True)
+            node.start(wait_for_binary_proto=True)
             node.nodetool('upgradesstables -a')
 
     def prepare(self, num_nodes=1, cassandra_version="github:apache/cassandra-2.2"):
diff --git a/upgrade_tests/upgrade_base.py b/upgrade_tests/upgrade_base.py
index a1426eb..057bd6a 100644
--- a/upgrade_tests/upgrade_base.py
+++ b/upgrade_tests/upgrade_base.py
@@ -196,7 +196,7 @@ class UpgradeTester(Tester, metaclass=ABCMeta):
         if self.fixture_dtest_setup.enable_for_jolokia:
             remove_perf_disable_shared_mem(node1)
 
-        node1.start(wait_for_binary_proto=True, wait_other_notice=True)
+        node1.start(wait_for_binary_proto=True)
 
         sessions_and_meta = []
         if self.CL:
diff --git a/upgrade_tests/upgrade_compact_storage.py b/upgrade_tests/upgrade_compact_storage.py
index f3fe12a..559ffef 100644
--- a/upgrade_tests/upgrade_compact_storage.py
+++ b/upgrade_tests/upgrade_compact_storage.py
@@ -80,7 +80,7 @@ class TestUpgradeSuperColumnsThrough(Tester):
         node1.watch_log_for("DRAINED")
         node1.stop(wait_other_notice=False)
         node1.set_install_dir(version=VERSION_TRUNK)
-        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
+        node1.start(wait_for_binary_proto=True)
 
         session = self.patient_cql_connection(node2, row_factory=dict_factory)
 
diff --git a/upgrade_tests/upgrade_supercolumns_test.py b/upgrade_tests/upgrade_supercolumns_test.py
index 40ac11c..d9fb738 100644
--- a/upgrade_tests/upgrade_supercolumns_test.py
+++ b/upgrade_tests/upgrade_supercolumns_test.py
@@ -172,5 +172,5 @@ class TestSCUpgrade(Tester):
             logger.debug('Starting %s on new version (%s)' % (node.name, tag))
             # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
             node.set_log_level("INFO")
-            node.start(wait_other_notice=True, wait_for_binary_proto=True)
+            node.start(wait_for_binary_proto=True)
             node.nodetool('upgradesstables -a')


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org