You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kudu.apache.org by al...@apache.org on 2017/06/26 20:48:59 UTC

[1/2] kudu git commit: [raft_consensus-itest] enable TestHammerOneRow back

Repository: kudu
Updated Branches:
  refs/heads/master aa0bac0cc -> 684f1fa81


[raft_consensus-itest] enable TestHammerOneRow back

This is a follow-up patch for 44bd94268c7beb08dda88bad182e177439a55c98.

Increasing the timeout for TestWorkload's write operations fixed
the issue -- no timeouts are seen after bumping timeout from 20 to
60 seconds.

Change-Id: I1de18a3ff251ef9f028ae3d60c5fa3d32b1ad527
Reviewed-on: http://gerrit.cloudera.org:8080/7289
Tested-by: Kudu Jenkins
Reviewed-by: Todd Lipcon <to...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/d5fe274d
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/d5fe274d
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/d5fe274d

Branch: refs/heads/master
Commit: d5fe274dcdfa3e8716372e41266e00ccb6f769d5
Parents: aa0bac0
Author: Alexey Serbin <as...@cloudera.com>
Authored: Fri Jun 23 17:04:09 2017 -0700
Committer: Todd Lipcon <to...@apache.org>
Committed: Mon Jun 26 19:35:33 2017 +0000

----------------------------------------------------------------------
 .../integration-tests/raft_consensus-itest.cc   | 33 ++++++++++++--------
 1 file changed, 20 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/d5fe274d/src/kudu/integration-tests/raft_consensus-itest.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/raft_consensus-itest.cc b/src/kudu/integration-tests/raft_consensus-itest.cc
index 7fda691..ee62c9f 100644
--- a/src/kudu/integration-tests/raft_consensus-itest.cc
+++ b/src/kudu/integration-tests/raft_consensus-itest.cc
@@ -2474,16 +2474,16 @@ TEST_F(RaftConsensusITest, TestMemoryRemainsConstantDespiteTwoDeadFollowers) {
 }
 
 static void EnableLogLatency(server::GenericServiceProxy* proxy) {
-  typedef unordered_map<string, string> FlagMap;
-  FlagMap flags;
-  InsertOrDie(&flags, "log_inject_latency", "true");
-  InsertOrDie(&flags, "log_inject_latency_ms_mean", "1000");
-  for (const FlagMap::value_type& e : flags) {
+  const unordered_map<string, string> kFlags = {
+    { "log_inject_latency",         "true" },
+    { "log_inject_latency_ms_mean", "1000" },
+  };
+  for (const auto& e : kFlags) {
     SetFlagRequestPB req;
-    SetFlagResponsePB resp;
-    RpcController rpc;
     req.set_flag(e.first);
     req.set_value(e.second);
+    SetFlagResponsePB resp;
+    RpcController rpc;
     ASSERT_OK(proxy->SetFlag(req, &resp, &rpc));
     SCOPED_TRACE(SecureDebugString(resp));
     ASSERT_EQ(SetFlagResponsePB::SUCCESS, resp.result());
@@ -2703,25 +2703,32 @@ TEST_F(RaftConsensusITest, TestSlowFollower) {
 
 // Run a special workload that constantly updates a single row on a cluster
 // where every replica is writing to its WAL slowly.
-TEST_F(RaftConsensusITest, DISABLED_TestHammerOneRow) {
+TEST_F(RaftConsensusITest, TestHammerOneRow) {
   if (!AllowSlowTests()) return;
+
   BuildAndStart(vector<string>());
 
   for (int i = 0; i < cluster_->num_tablet_servers(); i++) {
-    ExternalTabletServer* ts = cluster_->tablet_server(i);
-    TServerDetails* follower;
-    follower = GetReplicaWithUuidOrNull(tablet_id_, ts->instance_id().permanent_uuid());
-    ASSERT_TRUE(follower);
-    NO_FATALS(EnableLogLatency(follower->generic_proxy.get()));
+    const ExternalTabletServer* ts = cluster_->tablet_server(i);
+    const TServerDetails* replica = GetReplicaWithUuidOrNull(
+        tablet_id_, ts->instance_id().permanent_uuid());
+    ASSERT_NE(nullptr, replica);
+    NO_FATALS(EnableLogLatency(replica->generic_proxy.get()));
   }
 
   TestWorkload workload(cluster_.get());
   workload.set_table_name(kTableId);
   workload.set_write_pattern(TestWorkload::UPDATE_ONE_ROW);
+  workload.set_write_timeout_millis(60000);
   workload.set_num_write_threads(20);
   workload.Setup();
   workload.Start();
   SleepFor(MonoDelta::FromSeconds(60));
+  workload.StopAndJoin();
+
+  // Ensure that the replicas converge.
+  ClusterVerifier v(cluster_.get());
+  NO_FATALS(v.CheckCluster());
 }
 
 // Test that followers that fall behind the leader's log GC threshold are


[2/2] kudu git commit: [ts_itest-base] minor clean-up on BuildAndStart()

Posted by al...@apache.org.
[ts_itest-base] minor clean-up on BuildAndStart()

Minor clean-up on TabletServerIntegrationTestBase::BuildAndStart() and
its usage in raft_consensus-itest, exactly_once_writes-itest, and
kudu-admin-test.

This patch does not contain any functional changes.

Change-Id: I908db0f495170f899e47133fe877923cdf3aad21
Reviewed-on: http://gerrit.cloudera.org:8080/7290
Reviewed-by: Alexey Serbin <as...@cloudera.com>
Tested-by: Kudu Jenkins


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/684f1fa8
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/684f1fa8
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/684f1fa8

Branch: refs/heads/master
Commit: 684f1fa81c16725f0054e4c848a6dc59cec327c5
Parents: d5fe274
Author: Alexey Serbin <as...@cloudera.com>
Authored: Fri Jun 23 17:43:45 2017 -0700
Committer: Alexey Serbin <as...@cloudera.com>
Committed: Mon Jun 26 20:48:13 2017 +0000

----------------------------------------------------------------------
 .../exactly_once_writes-itest.cc                |   2 +-
 .../integration-tests/raft_consensus-itest.cc   | 292 +++++++++++--------
 src/kudu/integration-tests/ts_itest-base.h      |   8 +-
 src/kudu/tools/kudu-admin-test.cc               |  52 ++--
 4 files changed, 205 insertions(+), 149 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/684f1fa8/src/kudu/integration-tests/exactly_once_writes-itest.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/exactly_once_writes-itest.cc b/src/kudu/integration-tests/exactly_once_writes-itest.cc
index b2d88b0..03466f7 100644
--- a/src/kudu/integration-tests/exactly_once_writes-itest.cc
+++ b/src/kudu/integration-tests/exactly_once_writes-itest.cc
@@ -149,7 +149,7 @@ void ExactlyOnceSemanticsITest::DoTestWritesWithExactlyOnceSemantics(
   const int kBatchSize = 10;
   const int kNumThreadsPerReplica = 2;
 
-  BuildAndStart(ts_flags, master_flags);
+  NO_FATALS(BuildAndStart(ts_flags, master_flags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);

http://git-wip-us.apache.org/repos/asf/kudu/blob/684f1fa8/src/kudu/integration-tests/raft_consensus-itest.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/raft_consensus-itest.cc b/src/kudu/integration-tests/raft_consensus-itest.cc
index ee62c9f..51680a5 100644
--- a/src/kudu/integration-tests/raft_consensus-itest.cc
+++ b/src/kudu/integration-tests/raft_consensus-itest.cc
@@ -91,6 +91,11 @@ using master::TabletLocationsPB;
 using rpc::RpcController;
 using server::SetFlagRequestPB;
 using server::SetFlagResponsePB;
+using std::string;
+using std::unordered_map;
+using std::unordered_set;
+using std::vector;
+using strings::Substitute;
 
 static const int kConsensusRpcTimeoutForTests = 50;
 
@@ -401,7 +406,7 @@ void RaftConsensusITest::AddFlagsForLogRolls(vector<string>* extra_tserver_flags
 // Test that we can retrieve the permanent uuid of a server running
 // consensus service via RPC.
 TEST_F(RaftConsensusITest, TestGetPermanentUuid) {
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   RaftPeerPB peer;
   TServerDetails* leader = nullptr;
@@ -422,7 +427,7 @@ TEST_F(RaftConsensusITest, TestGetPermanentUuid) {
 // from the leader and then use that id to make the replica wait
 // until it is done. This will avoid the sleeps below.
 TEST_F(RaftConsensusITest, TestInsertAndMutateThroughConsensus) {
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   int num_iters = AllowSlowTests() ? 10 : 1;
 
@@ -436,7 +441,7 @@ TEST_F(RaftConsensusITest, TestInsertAndMutateThroughConsensus) {
 }
 
 TEST_F(RaftConsensusITest, TestFailedTransaction) {
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   // Wait until we have a stable leader.
   ASSERT_OK(WaitForServersToAgree(MonoDelta::FromSeconds(10), tablet_servers_,
@@ -480,7 +485,7 @@ TEST_F(RaftConsensusITest, TestFailedTransaction) {
 // that steals consensus peer locks for a while. This is meant to test that
 // even with timeouts and repeated requests consensus still works.
 TEST_F(RaftConsensusITest, MultiThreadedMutateAndInsertThroughConsensus) {
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   if (500 == FLAGS_client_inserts_per_thread) {
     if (AllowSlowTests()) {
@@ -518,7 +523,7 @@ TEST_F(RaftConsensusITest, MultiThreadedMutateAndInsertThroughConsensus) {
 }
 
 TEST_F(RaftConsensusITest, TestInsertOnNonLeader) {
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   // Wait for the initial leader election to complete.
   ASSERT_OK(WaitForServersToAgree(MonoDelta::FromSeconds(10), tablet_servers_,
@@ -553,8 +558,7 @@ TEST_F(RaftConsensusITest, TestInsertOnNonLeader) {
 TEST_F(RaftConsensusITest, TestRunLeaderElection) {
   // Reset consensus rpc timeout to the default value or the election might fail often.
   FLAGS_consensus_rpc_timeout_ms = 1000;
-
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   int num_iters = AllowSlowTests() ? 10 : 1;
 
@@ -626,7 +630,7 @@ void RaftConsensusITest::Write128KOpsToLeader(int num_writes) {
 // Also asserts that the other replicas retain logs for the stopped
 // follower to catch up from.
 TEST_F(RaftConsensusITest, TestCatchupAfterOpsEvicted) {
-  vector<string> extra_flags = {
+  const vector<string> kTsFlags = {
     "--log_cache_size_limit_mb=1",
     "--consensus_max_batch_size_bytes=500000",
     // Use short and synchronous rolls so that we can test log segment retention.
@@ -641,7 +645,8 @@ TEST_F(RaftConsensusITest, TestCatchupAfterOpsEvicted) {
     // And disable WAL compression so the 128KB cells don't get compressed away.
     "--log_compression_codec=none"
   };
-  BuildAndStart(extra_flags);
+
+  NO_FATALS(BuildAndStart(kTsFlags));
   TServerDetails* replica = (*tablet_replicas_.begin()).second;
   ASSERT_TRUE(replica != nullptr);
   ExternalTabletServer* replica_ets = cluster_->tablet_server_by_uuid(replica->uuid());
@@ -778,15 +783,14 @@ void RaftConsensusITest::CauseFollowerToFallBehindLogGC(string* leader_uuid,
 //
 // This is a regression test for KUDU-775 and KUDU-562.
 TEST_F(RaftConsensusITest, TestFollowerFallsBehindLeaderGC) {
-  vector<string> extra_flags = {
+  vector<string> ts_flags = {
     // Disable follower eviction to maintain the original intent of this test.
     "--evict_failed_followers=false",
     // We write 128KB cells in this test, so bump the limit.
     "--max_cell_size_bytes=1000000"
   };
-
-  AddFlagsForLogRolls(&extra_flags); // For CauseFollowerToFallBehindLogGC().
-  BuildAndStart(extra_flags);
+  AddFlagsForLogRolls(&ts_flags); // For CauseFollowerToFallBehindLogGC().
+  NO_FATALS(BuildAndStart(ts_flags));
 
   string leader_uuid;
   int64_t orig_term;
@@ -1048,8 +1052,7 @@ TEST_F(RaftConsensusITest, MultiThreadedInsertWithFailovers) {
   // Start a 7 node configuration cluster (since we can't bring leaders back we start with a
   // higher replica count so that we kill more leaders).
 
-  vector<string> flags;
-  BuildAndStart(flags);
+  NO_FATALS(BuildAndStart());
 
   OverrideFlagForSlowTests(
       "client_inserts_per_thread",
@@ -1100,7 +1103,7 @@ TEST_F(RaftConsensusITest, TestAutomaticLeaderElection) {
     FLAGS_num_tablet_servers = 5;
     FLAGS_num_replicas = 5;
   }
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   TServerDetails* leader;
   ASSERT_OK(GetLeaderReplicaWithRetries(tablet_id_, &leader));
@@ -1197,7 +1200,7 @@ void RaftConsensusITest::StubbornlyWriteSameRowThread(int replica_idx, const Ato
 TEST_F(RaftConsensusITest, TestKUDU_597) {
   FLAGS_num_replicas = 3;
   FLAGS_num_tablet_servers = 3;
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   AtomicBool finish(false);
   for (int i = 0; i < FLAGS_num_tablet_servers; i++) {
@@ -1242,14 +1245,18 @@ void RaftConsensusITest::AddOpWithTypeAndKey(const OpId& id,
 }
 
 void RaftConsensusITest::SetupSingleReplicaTest(TServerDetails** replica_ts) {
+  const vector<string> kTsFlags = {
+    // Don't use the hybrid clock as we set logical timestamps on ops.
+    "--use_hybrid_clock=false",
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false"
+  };
+
   FLAGS_num_replicas = 3;
   FLAGS_num_tablet_servers = 3;
-  vector<string> ts_flags, master_flags;
-  // Don't use the hybrid clock as we set logical timestamps on ops.
-  ts_flags.emplace_back("--use_hybrid_clock=false");
-  ts_flags.emplace_back("--enable_leader_failure_detection=false");
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  BuildAndStart(ts_flags, master_flags);
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   // Kill all the servers but one.
   vector<TServerDetails*> tservers;
@@ -1583,13 +1590,16 @@ TEST_F(RaftConsensusITest, TestReplicaBehaviorViaRPC) {
 }
 
 TEST_F(RaftConsensusITest, TestLeaderStepDown) {
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false"
+  };
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false"
+  };
+
   FLAGS_num_replicas = 3;
   FLAGS_num_tablet_servers = 3;
-
-  vector<string> ts_flags, master_flags;
-  ts_flags.emplace_back("--enable_leader_failure_detection=false");
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  BuildAndStart(ts_flags, master_flags);
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -1623,16 +1633,17 @@ TEST_F(RaftConsensusITest, TestLeaderStepDown) {
 // Prior to fixing KUDU-699, the step-down process would block
 // until the pending requests timed out.
 TEST_F(RaftConsensusITest, TestStepDownWithSlowFollower) {
-  vector<string> ts_flags = {
+  const vector<string> kTsFlags = {
     "--enable_leader_failure_detection=false",
     // Bump up the RPC timeout, so that we can verify that the stepdown responds
     // quickly even when an outbound request is hung.
     "--consensus_rpc_timeout_ms=15000"
   };
-  vector<string> master_flags = {
+  const vector<string> kMasterFlags = {
     "--catalog_manager_wait_for_new_tablets_to_elect_leader=false"
   };
-  BuildAndStart(ts_flags, master_flags);
+
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -1723,13 +1734,18 @@ void RaftConsensusITest::AssertMajorityRequiredForElectionsAndWrites(
 
 // Basic test of adding and removing servers from a configuration.
 TEST_F(RaftConsensusITest, TestAddRemoveServer) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(10);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(10);
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--master_add_server_when_underreplicated=false",
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 3;
-  vector<string> ts_flags = { "--enable_leader_failure_detection=false" };
-  vector<string> master_flags = { "--master_add_server_when_underreplicated=false" };
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  NO_FATALS(BuildAndStart(ts_flags, master_flags));
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -1805,12 +1821,17 @@ TEST_F(RaftConsensusITest, TestAddRemoveServer) {
 // Regression test for KUDU-1169: a crash when a Config Change operation is replaced
 // by a later leader.
 TEST_F(RaftConsensusITest, TestReplaceChangeConfigOperation) {
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--master_add_server_when_underreplicated=false",
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 3;
-  vector<string> ts_flags = { "--enable_leader_failure_detection=false" };
-  vector<string> master_flags = { "--master_add_server_when_underreplicated=false" };
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  NO_FATALS(BuildAndStart(ts_flags, master_flags));
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -1871,12 +1892,17 @@ TEST_F(RaftConsensusITest, TestReplaceChangeConfigOperation) {
 
 // Test the atomic CAS arguments to ChangeConfig() add server and remove server.
 TEST_F(RaftConsensusITest, TestAtomicAddRemoveServer) {
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--master_add_server_when_underreplicated=false",
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 3;
-  vector<string> ts_flags = { "--enable_leader_failure_detection=false" };
-  vector<string> master_flags = { "--master_add_server_when_underreplicated=false" };
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  NO_FATALS(BuildAndStart(ts_flags, master_flags));
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -1956,12 +1982,16 @@ TEST_F(RaftConsensusITest, TestElectPendingVoter) {
   //  8. Start a leader election on the new (pending) node. It should win.
   //  9. Unpause the two remaining stopped nodes.
   // 10. Wait for all nodes to sync to the new leader's log.
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
   FLAGS_num_tablet_servers = 5;
   FLAGS_num_replicas = 5;
-  vector<string> ts_flags, master_flags;
-  ts_flags.emplace_back("--enable_leader_failure_detection=false");
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  BuildAndStart(ts_flags, master_flags);
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -2068,12 +2098,17 @@ void DoWriteTestRows(const TServerDetails* leader_tserver,
 
 // Test that config change works while running a workload.
 TEST_F(RaftConsensusITest, TestConfigChangeUnderLoad) {
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--master_add_server_when_underreplicated=false",
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 3;
-  vector<string> ts_flags = { "--enable_leader_failure_detection=false" };
-  vector<string> master_flags = { "--master_add_server_when_underreplicated=false" };
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  BuildAndStart(ts_flags, master_flags);
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -2158,12 +2193,14 @@ TEST_F(RaftConsensusITest, TestConfigChangeUnderLoad) {
 
 TEST_F(RaftConsensusITest, TestMasterNotifiedOnConfigChange) {
   MonoDelta timeout = MonoDelta::FromSeconds(30);
+  const vector<string> kMasterFlags = {
+    "--master_add_server_when_underreplicated=false",
+    "--allow_unsafe_replication_factor=true",
+  };
+
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 2;
-  vector<string> ts_flags;
-  vector<string> master_flags = { "--master_add_server_when_underreplicated=false",
-                                  "--allow_unsafe_replication_factor=true"};
-  NO_FATALS(BuildAndStart(ts_flags, master_flags));
+  NO_FATALS(BuildAndStart({}, kMasterFlags));
 
   LOG(INFO) << "Finding tablet leader and waiting for things to start...";
   string tablet_id = tablet_replicas_.begin()->first;
@@ -2240,26 +2277,28 @@ TEST_F(RaftConsensusITest, TestEarlyCommitDespiteMemoryPressure) {
 
   // Set up a 3-node configuration with only one live follower so that we can
   // manipulate it directly via RPC.
-  vector<string> ts_flags, master_flags;
-
-  // If failure detection were on, a follower could be elected as leader after
-  // we kill the leader below.
-  ts_flags.emplace_back("--enable_leader_failure_detection=false");
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-
-  // Very low memory limit to ease testing.
-  // When using tcmalloc, we set it to 30MB, since we can get accurate process memory
-  // usage statistics. Otherwise, set to only 4MB, since we'll only be throttling based
-  // on our tracked memory.
+  const vector<string> kTsFlags = {
+    // Very low memory limit to ease testing.
+    // When using tcmalloc, we set it to 30MB, since we can get accurate process memory
+    // usage statistics. Otherwise, set to only 4MB, since we'll only be throttling based
+    // on our tracked memory.
 #ifdef TCMALLOC_ENABLED
-  ts_flags.emplace_back("--memory_limit_hard_bytes=30000000");
+    "--memory_limit_hard_bytes=30000000",
 #else
-  ts_flags.push_back("--memory_limit_hard_bytes=4194304");
+    "--memory_limit_hard_bytes=4194304",
 #endif
-  // Don't let transaction memory tracking get in the way.
-  ts_flags.emplace_back("--tablet_transaction_memory_limit_mb=-1");
+    "--enable_leader_failure_detection=false",
+    // Don't let transaction memory tracking get in the way.
+    "--tablet_transaction_memory_limit_mb=-1",
+  };
 
-  BuildAndStart(ts_flags, master_flags);
+  // If failure detection were on, a follower could be elected as leader after
+  // we kill the leader below.
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   // Elect server 2 as leader, then kill it and server 1, leaving behind
   // server 0 as the sole follower.
@@ -2322,28 +2361,25 @@ TEST_F(RaftConsensusITest, TestEarlyCommitDespiteMemoryPressure) {
 
 // Test that we can create (vivify) a new tablet via tablet copy.
 TEST_F(RaftConsensusITest, TestAutoCreateReplica) {
-  FLAGS_num_tablet_servers = 3;
-  FLAGS_num_replicas = 2;
-
-  vector<string> ts_flags = {
-      "--enable_leader_failure_detection=false",
-      "--log_cache_size_limit_mb=1",
-      "--log_segment_size_mb=1",
-      "--log_async_preallocate_segments=false",
-      "--flush_threshold_mb=1",
-      "--maintenance_manager_polling_interval_ms=300",
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+    "--log_cache_size_limit_mb=1",
+    "--log_segment_size_mb=1",
+    "--log_async_preallocate_segments=false",
+    "--flush_threshold_mb=1",
+    "--maintenance_manager_polling_interval_ms=300",
   };
-  vector<string> master_flags = {
-      "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
-      "--allow_unsafe_replication_factor=true"
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+    "--allow_unsafe_replication_factor=true",
   };
-  BuildAndStart(ts_flags, master_flags);
+
+  FLAGS_num_tablet_servers = 3;
+  FLAGS_num_replicas = 2;
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   // 50K is enough to cause flushes & log rolls.
-  int num_rows_to_write = 50000;
-  if (AllowSlowTests()) {
-    num_rows_to_write = 150000;
-  }
+  const int num_rows_to_write = AllowSlowTests() ? 150000 : 50000;
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -2422,9 +2458,7 @@ TEST_F(RaftConsensusITest, TestMemoryRemainsConstantDespiteTwoDeadFollowers) {
 
   // Start the cluster with a low per-tablet transaction memory limit, so that
   // the test can complete faster.
-  vector<string> flags;
-  flags.emplace_back("--tablet_transaction_memory_limit_mb=2");
-  BuildAndStart(flags);
+  NO_FATALS(BuildAndStart({ "--tablet_transaction_memory_limit_mb=2" }));
 
   // Kill both followers.
   TServerDetails* details;
@@ -2493,7 +2527,8 @@ static void EnableLogLatency(server::GenericServiceProxy* proxy) {
 // Run a regular workload with a leader that's writing to its WAL slowly.
 TEST_F(RaftConsensusITest, TestSlowLeader) {
   if (!AllowSlowTests()) return;
-  BuildAndStart(vector<string>());
+
+  NO_FATALS(BuildAndStart());
 
   TServerDetails* leader;
   ASSERT_OK(GetLeaderReplicaWithRetries(tablet_id_, &leader));
@@ -2509,14 +2544,15 @@ TEST_F(RaftConsensusITest, TestSlowLeader) {
 
 // Test write batches just below the maximum limit.
 TEST_F(RaftConsensusITest, TestLargeBatches) {
-  vector<string> ts_flags = {
+  const vector<string> kTsFlags = {
     // We write 128KB cells in this test, so bump the limit, and disable compression.
     "--max_cell_size_bytes=1000000",
     "--log_segment_size_mb=1",
     "--log_compression_codec=none",
     "--log_min_segments_to_retain=100", // disable GC of logs.
   };
-  BuildAndStart(ts_flags);
+
+  NO_FATALS(BuildAndStart(kTsFlags));
 
   const int64_t kBatchSize = 40; // Write 40 * 128kb = 5MB per batch.
   const int64_t kNumBatchesToWrite = 100;
@@ -2592,7 +2628,7 @@ TEST_F(RaftConsensusITest, TestCommitIndexFarBehindAfterLeaderElection) {
   // Set the batch size low so that, after the new leader takes
   // over below, the ops required to catch up from the committed index
   // to the newly replicated index don't fit into a single batch.
-  BuildAndStart({"--consensus_max_batch_size_bytes=50000"});
+  NO_FATALS(BuildAndStart({"--consensus_max_batch_size_bytes=50000"}));
 
   // Get the leader and the two replica tablet servers.
   // These will have the following roles in this test:
@@ -2675,7 +2711,8 @@ TEST_F(RaftConsensusITest, TestCommitIndexFarBehindAfterLeaderElection) {
 // Run a regular workload with one follower that's writing to its WAL slowly.
 TEST_F(RaftConsensusITest, TestSlowFollower) {
   if (!AllowSlowTests()) return;
-  BuildAndStart(vector<string>());
+
+  NO_FATALS(BuildAndStart());
 
   TServerDetails* leader;
   ASSERT_OK(GetLeaderReplicaWithRetries(tablet_id_, &leader));
@@ -2706,7 +2743,7 @@ TEST_F(RaftConsensusITest, TestSlowFollower) {
 TEST_F(RaftConsensusITest, TestHammerOneRow) {
   if (!AllowSlowTests()) return;
 
-  BuildAndStart(vector<string>());
+  NO_FATALS(BuildAndStart());
 
   for (int i = 0; i < cluster_->num_tablet_servers(); i++) {
     const ExternalTabletServer* ts = cluster_->tablet_server(i);
@@ -2769,7 +2806,7 @@ TEST_F(RaftConsensusITest, TestMasterReplacesEvictedFollowers) {
     "--max_cell_size_bytes=1000000"
   };
   AddFlagsForLogRolls(&ts_flags); // For CauseFollowerToFallBehindLogGC().
-  BuildAndStart(ts_flags);
+  NO_FATALS(BuildAndStart(ts_flags));
 
   MonoDelta timeout = MonoDelta::FromSeconds(30);
 
@@ -2792,9 +2829,14 @@ TEST_F(RaftConsensusITest, TestMasterReplacesEvictedFollowers) {
 // This is required for correctness of Raft config change. For details,
 // see https://groups.google.com/forum/#!topic/raft-dev/t4xj6dJTP6E
 TEST_F(RaftConsensusITest, TestChangeConfigRejectedUnlessNoopReplicated) {
-  vector<string> ts_flags = { "--enable_leader_failure_detection=false" };
-  vector<string> master_flags = { "--catalog_manager_wait_for_new_tablets_to_elect_leader=false" };
-  BuildAndStart(ts_flags, master_flags);
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   MonoDelta timeout = MonoDelta::FromSeconds(30);
 
@@ -2829,10 +2871,15 @@ TEST_F(RaftConsensusITest, TestChangeConfigRejectedUnlessNoopReplicated) {
 // config change operation is aborted during tablet deletion when that config change
 // was in fact already persisted to disk.
 TEST_F(RaftConsensusITest, Test_KUDU_1735) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(10);
-  vector<string> ts_flags = { "--enable_leader_failure_detection=false" };
-  vector<string> master_flags = { "--catalog_manager_wait_for_new_tablets_to_elect_leader=false" };
-  NO_FATALS(BuildAndStart(ts_flags, master_flags));
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(10);
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   vector<ExternalTabletServer*> external_tservers;
@@ -2877,11 +2924,14 @@ TEST_F(RaftConsensusITest, Test_KUDU_1735) {
 // back as an error in UpdateConsensus().
 TEST_F(RaftConsensusITest, TestUpdateConsensusErrorNonePrepared) {
   const int kNumOps = 10;
+  vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+  };
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
 
-  vector<string> ts_flags, master_flags;
-  ts_flags.emplace_back("--enable_leader_failure_detection=false");
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  BuildAndStart(ts_flags, master_flags);
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -2922,7 +2972,7 @@ TEST_F(RaftConsensusITest, TestUpdateConsensusErrorNonePrepared) {
 // doesn't crash, but instead just marks the tablet as corrupt.
 TEST_F(RaftConsensusITest, TestCorruptReplicaMetadata) {
   // Start cluster and wait until we have a stable leader.
-  BuildAndStart({}, {});
+  NO_FATALS(BuildAndStart());
   ASSERT_OK(WaitForServersToAgree(MonoDelta::FromSeconds(10), tablet_servers_,
                                   tablet_id_, 1));
 
@@ -2962,15 +3012,19 @@ TEST_F(RaftConsensusITest, TestCorruptReplicaMetadata) {
 // First, we test that failed replicates are fatal. Then, we test that failed
 // commits are fatal.
 TEST_F(RaftConsensusITest, TestLogIOErrorIsFatal) {
+  const vector<string> kTsFlags = {
+    "--enable_leader_failure_detection=false",
+    // Disable core dumps since we will inject FATAL errors, and dumping
+    // core can take a long time.
+    "--disable_core_dumps",
+  };
+  const vector<string> kMasterFlags = {
+    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+  };
+
   FLAGS_num_replicas = 3;
   FLAGS_num_tablet_servers = 3;
-  vector<string> ts_flags, master_flags;
-  ts_flags = {"--enable_leader_failure_detection=false",
-              // Disable core dumps since we will inject FATAL errors, and dumping
-              // core can take a long time.
-              "--disable_core_dumps"};
-  master_flags.emplace_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
-  NO_FATALS(BuildAndStart(ts_flags, master_flags));
+  NO_FATALS(BuildAndStart(kTsFlags, kMasterFlags));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);

http://git-wip-us.apache.org/repos/asf/kudu/blob/684f1fa8/src/kudu/integration-tests/ts_itest-base.h
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/ts_itest-base.h b/src/kudu/integration-tests/ts_itest-base.h
index 7b223f8..5e4feaa 100644
--- a/src/kudu/integration-tests/ts_itest-base.h
+++ b/src/kudu/integration-tests/ts_itest-base.h
@@ -507,13 +507,13 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
   // Starts an external cluster with a single tablet and a number of replicas equal
   // to 'FLAGS_num_replicas'. The caller can pass 'ts_flags' to specify non-default
   // flags to pass to the tablet servers.
-  void BuildAndStart(const std::vector<std::string>& ts_flags = std::vector<std::string>(),
-                     const std::vector<std::string>& master_flags = std::vector<std::string>()) {
-    CreateCluster("raft_consensus-itest-cluster", ts_flags, master_flags);
+  void BuildAndStart(const std::vector<std::string>& ts_flags = {},
+                     const std::vector<std::string>& master_flags = {}) {
+    NO_FATALS(CreateCluster("raft_consensus-itest-cluster", ts_flags, master_flags));
     NO_FATALS(CreateClient(&client_));
     NO_FATALS(CreateTable());
     WaitForTSAndReplicas();
-    CHECK_GT(tablet_replicas_.size(), 0);
+    ASSERT_FALSE(tablet_replicas_.empty());
     tablet_id_ = (*tablet_replicas_.begin()).first;
   }
 

http://git-wip-us.apache.org/repos/asf/kudu/blob/684f1fa8/src/kudu/tools/kudu-admin-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/tools/kudu-admin-test.cc b/src/kudu/tools/kudu-admin-test.cc
index e1c8841..376d9c1 100644
--- a/src/kudu/tools/kudu-admin-test.cc
+++ b/src/kudu/tools/kudu-admin-test.cc
@@ -67,9 +67,10 @@ class AdminCliTest : public tserver::TabletServerIntegrationTestBase {
 TEST_F(AdminCliTest, TestChangeConfig) {
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 2;
-  BuildAndStart({ "--enable_leader_failure_detection=false" },
-                { "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
-                  "--allow_unsafe_replication_factor=true"});
+  NO_FATALS(BuildAndStart(
+      { "--enable_leader_failure_detection=false" },
+      { "--catalog_manager_wait_for_new_tablets_to_elect_leader=false",
+        "--allow_unsafe_replication_factor=true"}));
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -185,7 +186,7 @@ Status RunUnsafeChangeConfig(const string& tablet_id,
 // 5. Bring up leader and follower1 and verify replicas are deleted.
 // 6. Verify that new config doesn't contain old leader and follower1.
 TEST_F(AdminCliTest, TestUnsafeChangeConfigOnSingleFollower) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(30);
   FLAGS_num_tablet_servers = 5;
   FLAGS_num_replicas = 3;
   // tserver_unresponsive_timeout_ms is useful so that master considers
@@ -279,10 +280,10 @@ TEST_F(AdminCliTest, TestUnsafeChangeConfigOnSingleFollower) {
 // 4. Wait until the new config is populated on leader and master.
 // 5. Verify that new config does not contain old followers.
 TEST_F(AdminCliTest, TestUnsafeChangeConfigOnSingleLeader) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(30);
   FLAGS_num_tablet_servers = 5;
   FLAGS_num_replicas = 3;
-  NO_FATALS(BuildAndStart({}, {}));
+  NO_FATALS(BuildAndStart());
 
   // Determine the list of tablet servers currently in the config.
   TabletServerMap active_tablet_servers;
@@ -363,10 +364,10 @@ TEST_F(AdminCliTest, TestUnsafeChangeConfigOnSingleLeader) {
 // 4. Wait until the new config is populated on new_leader and master.
 // 5. Verify that new config does not contain old leader.
 TEST_F(AdminCliTest, TestUnsafeChangeConfigForConfigWithTwoNodes) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(30);
   FLAGS_num_tablet_servers = 4;
   FLAGS_num_replicas = 3;
-  NO_FATALS(BuildAndStart({}, {}));
+  NO_FATALS(BuildAndStart());
 
   // Determine the list of tablet servers currently in the config.
   TabletServerMap active_tablet_servers;
@@ -446,11 +447,12 @@ TEST_F(AdminCliTest, TestUnsafeChangeConfigForConfigWithTwoNodes) {
 // 4. Wait until the new config is populated on new_leader and master.
 // 5. Verify that new config does not contain old leader and old followers.
 TEST_F(AdminCliTest, TestUnsafeChangeConfigWithFiveReplicaConfig) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+
+  // Retire the dead servers early with these settings.
   FLAGS_num_tablet_servers = 8;
   FLAGS_num_replicas = 5;
-  // Retire the dead servers early with these settings.
-  NO_FATALS(BuildAndStart({}, {}));
+  NO_FATALS(BuildAndStart());
 
   vector<TServerDetails*> tservers;
   vector<ExternalTabletServer*> external_tservers;
@@ -541,10 +543,10 @@ TEST_F(AdminCliTest, TestUnsafeChangeConfigWithFiveReplicaConfig) {
 // 6. Verify that new config does not contain old followers and a standby node
 //    has populated the new config.
 TEST_F(AdminCliTest, TestUnsafeChangeConfigLeaderWithPendingConfig) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(30);
   FLAGS_num_tablet_servers = 5;
   FLAGS_num_replicas = 3;
-  NO_FATALS(BuildAndStart({}, {}));
+  NO_FATALS(BuildAndStart());
 
   // Determine the list of tablet servers currently in the config.
   TabletServerMap active_tablet_servers;
@@ -638,10 +640,10 @@ TEST_F(AdminCliTest, TestUnsafeChangeConfigLeaderWithPendingConfig) {
 // 7. Verify that new config does not contain old followers and a standby node
 //    has populated the new config.
 TEST_F(AdminCliTest, TestUnsafeChangeConfigFollowerWithPendingConfig) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(30);
   FLAGS_num_tablet_servers = 5;
   FLAGS_num_replicas = 3;
-  NO_FATALS(BuildAndStart({}, {}));
+  NO_FATALS(BuildAndStart());
 
   // Determine the list of tablet servers currently in the config.
   TabletServerMap active_tablet_servers;
@@ -744,10 +746,10 @@ TEST_F(AdminCliTest, TestUnsafeChangeConfigFollowerWithPendingConfig) {
 // 6. Shutdown and restart the leader and verify that tablet bootstrapped on leader.
 // 7. Verify that a new node has populated the new config with 3 voters.
 TEST_F(AdminCliTest, TestUnsafeChangeConfigWithPendingConfigsOnWAL) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(30);
   FLAGS_num_tablet_servers = 5;
   FLAGS_num_replicas = 3;
-  NO_FATALS(BuildAndStart({}, {}));
+  NO_FATALS(BuildAndStart());
 
   // Determine the list of tablet servers currently in the config.
   TabletServerMap active_tablet_servers;
@@ -849,11 +851,11 @@ TEST_F(AdminCliTest, TestUnsafeChangeConfigWithPendingConfigsOnWAL) {
 // 4. Wait until the new config is populated on the master and the new leader.
 // 5. Verify that new config does not contain old followers.
 TEST_F(AdminCliTest, TestUnsafeChangeConfigWithMultiplePendingConfigs) {
-  MonoDelta kTimeout = MonoDelta::FromSeconds(30);
+  const MonoDelta kTimeout = MonoDelta::FromSeconds(30);
   FLAGS_num_tablet_servers = 9;
   FLAGS_num_replicas = 5;
   // Retire the dead servers early with these settings.
-  NO_FATALS(BuildAndStart({}, {}));
+  NO_FATALS(BuildAndStart());
 
   vector<TServerDetails*> tservers;
   vector<ExternalTabletServer*> external_tservers;
@@ -970,7 +972,7 @@ Status GetTermFromConsensus(const vector<TServerDetails*>& tservers,
 TEST_F(AdminCliTest, TestLeaderStepDown) {
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 3;
-  BuildAndStart({}, {});
+  NO_FATALS(BuildAndStart());
 
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
@@ -1011,9 +1013,9 @@ TEST_F(AdminCliTest, TestLeaderStepDown) {
 TEST_F(AdminCliTest, TestLeaderStepDownWhenNotPresent) {
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 3;
-  BuildAndStart(
+  NO_FATALS(BuildAndStart(
       { "--enable_leader_failure_detection=false" },
-      { "--catalog_manager_wait_for_new_tablets_to_elect_leader=false" });
+      { "--catalog_manager_wait_for_new_tablets_to_elect_leader=false" }));
   vector<TServerDetails*> tservers;
   AppendValuesFromMap(tablet_servers_, &tservers);
   ASSERT_EQ(FLAGS_num_tablet_servers, tservers.size());
@@ -1042,7 +1044,7 @@ TEST_F(AdminCliTest, TestLeaderStepDownWhenNotPresent) {
 TEST_F(AdminCliTest, TestDeleteTable) {
   FLAGS_num_tablet_servers = 1;
   FLAGS_num_replicas = 1;
-  BuildAndStart({}, {});
+  NO_FATALS(BuildAndStart());
 
   string master_address = cluster_->master()->bound_rpc_addr().ToString();
   shared_ptr<KuduClient> client;
@@ -1067,7 +1069,7 @@ TEST_F(AdminCliTest, TestListTables) {
   FLAGS_num_tablet_servers = 1;
   FLAGS_num_replicas = 1;
 
-  BuildAndStart({}, {});
+  NO_FATALS(BuildAndStart());
 
   string stdout;
   ASSERT_OK(Subprocess::Call({
@@ -1087,7 +1089,7 @@ TEST_F(AdminCliTest, TestListTablesDetail) {
   FLAGS_num_tablet_servers = 3;
   FLAGS_num_replicas = 3;
 
-  BuildAndStart({}, {});
+  NO_FATALS(BuildAndStart());
 
   // Add another table to test multiple tables output.
   const string kAnotherTableId = "TestAnotherTable";