You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kudu.apache.org by gr...@apache.org on 2019/08/07 23:37:32 UTC

[kudu] 01/02: [code style] Keep same code style in test files

This is an automated email from the ASF dual-hosted git repository.

granthenke pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 344f635b7bd47d609502ed64a1aec74e72c88619
Author: honeyhexin <ho...@sohu.com>
AuthorDate: Tue Aug 6 13:29:43 2019 +0800

    [code style] Keep same code style in test files
    
    Change all ASSERT_NO_FATAL_FAILURE to NO_FATALS
    to keep the same code style in test files. In addition,
    delete out-of-date logic in client-test.
    
    Change-Id: I67689fe8113d88f60ac33fa38504813128209da3
    Reviewed-on: http://gerrit.cloudera.org:8080/14016
    Reviewed-by: Adar Dembo <ad...@cloudera.com>
    Tested-by: Kudu Jenkins
---
 src/kudu/cfile/bloomfile-test.cc                   |   6 +-
 src/kudu/cfile/mt-bloomfile-test.cc                |   2 +-
 src/kudu/client/client-test.cc                     | 167 +++++++++------------
 src/kudu/common/wire_protocol-test.cc              |   2 +-
 src/kudu/consensus/consensus_queue-test.cc         |  10 +-
 src/kudu/consensus/leader_election-test.cc         |  52 +++----
 src/kudu/consensus/mt-log-test.cc                  |   6 +-
 src/kudu/consensus/raft_consensus_quorum-test.cc   |  22 +--
 src/kudu/fs/block_manager-test.cc                  |  20 +--
 src/kudu/integration-tests/alter_table-test.cc     |   4 +-
 .../integration-tests/create-table-stress-test.cc  |   6 +-
 src/kudu/integration-tests/linked_list-test.cc     |   8 +-
 .../integration-tests/master_failover-itest.cc     |   2 +-
 src/kudu/integration-tests/registration-test.cc    |   2 +-
 .../update_scan_delta_compact-test.cc              |   8 +-
 src/kudu/master/master-test.cc                     |   8 +-
 src/kudu/rpc/rpc-test.cc                           |  10 +-
 src/kudu/tablet/compaction-test.cc                 |  28 ++--
 src/kudu/tablet/composite-pushdown-test.cc         |  30 ++--
 src/kudu/tablet/major_delta_compaction-test.cc     |  64 ++++----
 src/kudu/tablet/mt-rowset_delta_compaction-test.cc |   4 +-
 src/kudu/tablet/tablet_random_access-test.cc       |   3 +-
 src/kudu/tserver/tablet_server-test.cc             |  58 +++----
 src/kudu/tserver/ts_tablet_manager-test.cc         |   4 +-
 src/kudu/util/env-test.cc                          |  20 +--
 src/kudu/util/trace-test.cc                        |   2 +-
 26 files changed, 261 insertions(+), 287 deletions(-)

diff --git a/src/kudu/cfile/bloomfile-test.cc b/src/kudu/cfile/bloomfile-test.cc
index 56bb694..10a6e4c 100644
--- a/src/kudu/cfile/bloomfile-test.cc
+++ b/src/kudu/cfile/bloomfile-test.cc
@@ -81,14 +81,14 @@ class BloomFileTest : public BloomFileTestBase {
 
 
 TEST_F(BloomFileTest, TestWriteAndRead) {
-  ASSERT_NO_FATAL_FAILURE(WriteTestBloomFile());
+  NO_FATALS(WriteTestBloomFile());
   ASSERT_OK(OpenBloomFile());
   VerifyBloomFile();
 }
 
 #ifdef NDEBUG
 TEST_F(BloomFileTest, Benchmark) {
-  ASSERT_NO_FATAL_FAILURE(WriteTestBloomFile());
+  NO_FATALS(WriteTestBloomFile());
   ASSERT_OK(OpenBloomFile());
 
   uint64_t count_present = ReadBenchmark();
@@ -108,7 +108,7 @@ TEST_F(BloomFileTest, Benchmark) {
 #endif
 
 TEST_F(BloomFileTest, TestLazyInit) {
-  ASSERT_NO_FATAL_FAILURE(WriteTestBloomFile());
+  NO_FATALS(WriteTestBloomFile());
 
   shared_ptr<MemTracker> tracker = MemTracker::CreateTracker(-1, "test");
   int64_t initial_mem_usage = tracker->consumption();
diff --git a/src/kudu/cfile/mt-bloomfile-test.cc b/src/kudu/cfile/mt-bloomfile-test.cc
index 76aada1..940be0f 100644
--- a/src/kudu/cfile/mt-bloomfile-test.cc
+++ b/src/kudu/cfile/mt-bloomfile-test.cc
@@ -40,7 +40,7 @@ class MTBloomFileTest : public BloomFileTestBase {
 };
 
 TEST_F(MTBloomFileTest, Benchmark) {
-  ASSERT_NO_FATAL_FAILURE(WriteTestBloomFile());
+  NO_FATALS(WriteTestBloomFile());
   ASSERT_OK(OpenBloomFile());
 
   std::vector<scoped_refptr<kudu::Thread> > threads;
diff --git a/src/kudu/client/client-test.cc b/src/kudu/client/client-test.cc
index 290380e..ac7b6c1 100644
--- a/src/kudu/client/client-test.cc
+++ b/src/kudu/client/client-test.cc
@@ -210,7 +210,7 @@ class ClientTest : public KuduTest {
         .add_master_server_addr(cluster_->mini_master()->bound_rpc_addr().ToString())
         .Build(&client_));
 
-    ASSERT_NO_FATAL_FAILURE(CreateTable(kTableName, 1, GenerateSplitRows(), {}, &client_table_));
+    NO_FATALS(CreateTable(kTableName, 1, GenerateSplitRows(), {}, &client_table_));
   }
 
   void TearDown() override {
@@ -356,10 +356,9 @@ class ClientTest : public KuduTest {
     shared_ptr<KuduSession> session = client->NewSession();
     ASSERT_OK(session->SetFlushMode(KuduSession::AUTO_FLUSH_BACKGROUND));
     session->SetTimeoutMillis(60000);
-    ASSERT_NO_FATAL_FAILURE(InsertTestRows(table, session.get(),
-                                           num_rows, first_row));
+    NO_FATALS(InsertTestRows(table, session.get(), num_rows, first_row));
     FlushSessionOrDie(session);
-    ASSERT_NO_FATAL_FAILURE(CheckNoRpcOverflow());
+    NO_FATALS(CheckNoRpcOverflow());
   }
 
   // Inserts 'num_rows' using the default client.
@@ -376,7 +375,7 @@ class ClientTest : public KuduTest {
       ASSERT_OK(session->Apply(update.release()));
     }
     FlushSessionOrDie(session);
-    ASSERT_NO_FATAL_FAILURE(CheckNoRpcOverflow());
+    NO_FATALS(CheckNoRpcOverflow());
   }
 
   void DeleteTestRows(KuduTable* table, int lo, int hi) {
@@ -388,7 +387,7 @@ class ClientTest : public KuduTest {
       ASSERT_OK(session->Apply(del.release()))
     }
     FlushSessionOrDie(session);
-    ASSERT_NO_FATAL_FAILURE(CheckNoRpcOverflow());
+    NO_FATALS(CheckNoRpcOverflow());
   }
 
   unique_ptr<KuduInsert> BuildTestRow(KuduTable* table, int index) {
@@ -853,8 +852,7 @@ TEST_F(ClientTest, TestRandomizedLimitScans) {
   FLAGS_flush_threshold_secs = 1;
 
   FLAGS_scanner_batch_size_rows = batch_size;
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(
-      client_table_.get(), num_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), num_rows));
   SleepFor(MonoDelta::FromSeconds(1));
   LOG(INFO) << Substitute("Total number of rows: $0, batch size: $1", num_rows, batch_size);
 
@@ -892,8 +890,7 @@ TEST_F(ClientTest, TestRandomizedLimitScans) {
 }
 
 TEST_F(ClientTest, TestScan) {
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(
-      client_table_.get(), FLAGS_test_scan_num_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), FLAGS_test_scan_num_rows));
 
   ASSERT_EQ(FLAGS_test_scan_num_rows, CountRowsFromClient(client_table_.get()));
 
@@ -925,8 +922,7 @@ TEST_F(ClientTest, TestScanAtSnapshot) {
   int half_the_rows = FLAGS_test_scan_num_rows / 2;
 
   // Insert half the rows
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(),
-                                         half_the_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), half_the_rows));
 
   // Get the time from the server and transform to micros, disregarding any
   // logical values (we shouldn't have any with a single server anyway).
@@ -934,8 +930,7 @@ TEST_F(ClientTest, TestScanAtSnapshot) {
       cluster_->mini_tablet_server(0)->server()->clock()->Now());
 
   // Insert the second half of the rows
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(),
-                                         half_the_rows, half_the_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), half_the_rows, half_the_rows));
 
   KuduScanner scanner(client_table_.get());
   ASSERT_OK(scanner.Open());
@@ -1014,8 +1009,8 @@ TEST_P(ScanMultiTabletParamTest, Test) {
       ASSERT_OK(row->SetInt32(0, i * kRowsPerTablet));
       rows.emplace_back(std::move(row));
     }
-    ASSERT_NO_FATAL_FAILURE(CreateTable("TestScanMultiTablet", 1,
-                                        std::move(rows), {}, &table));
+    NO_FATALS(CreateTable("TestScanMultiTablet", 1,
+                          std::move(rows), {}, &table));
   }
 
   // Insert rows with keys 12, 13, 15, 17, 22, 23, 25, 27...47 into each
@@ -1142,8 +1137,7 @@ TEST_F(ClientTest, TestScanEmptyTable) {
 // row block with the proper number of rows filled in. Impala issues
 // scans like this in order to implement COUNT(*).
 TEST_F(ClientTest, TestScanEmptyProjection) {
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(),
-                                         FLAGS_test_scan_num_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), FLAGS_test_scan_num_rows));
   KuduScanner scanner(client_table_.get());
   ASSERT_OK(scanner.SetProjectedColumnNames({}));
   ASSERT_EQ(scanner.GetProjectionSchema().num_columns(), 0);
@@ -1177,8 +1171,7 @@ TEST_F(ClientTest, TestProjectInvalidColumn) {
 // Test a scan where we have a predicate on a key column that is not
 // in the projection.
 TEST_F(ClientTest, TestScanPredicateKeyColNotProjected) {
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(),
-                                         FLAGS_test_scan_num_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), FLAGS_test_scan_num_rows));
   KuduScanner scanner(client_table_.get());
   ASSERT_OK(scanner.SetProjectedColumnNames({ "int_val" }));
   ASSERT_EQ(scanner.GetProjectionSchema().num_columns(), 1);
@@ -1214,8 +1207,7 @@ TEST_F(ClientTest, TestScanPredicateKeyColNotProjected) {
 // Test a scan where we have a predicate on a non-key column that is
 // not in the projection.
 TEST_F(ClientTest, TestScanPredicateNonKeyColNotProjected) {
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(),
-                                         FLAGS_test_scan_num_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), FLAGS_test_scan_num_rows));
   KuduScanner scanner(client_table_.get());
   ASSERT_OK(scanner.AddConjunctPredicate(
                 client_table_->NewComparisonPredicate("int_val", KuduPredicate::GREATER_EQUAL,
@@ -1286,7 +1278,7 @@ TEST_F(ClientTest, TestInvalidPredicates) {
 TEST_F(ClientTest, TestScanCloseProxy) {
   const string kEmptyTable = "TestScanCloseProxy";
   shared_ptr<KuduTable> table;
-  ASSERT_NO_FATAL_FAILURE(CreateTable(kEmptyTable, 3, GenerateSplitRows(), {}, &table));
+  NO_FATALS(CreateTable(kEmptyTable, 3, GenerateSplitRows(), {}, &table));
 
   {
     // Open and close an empty scanner.
@@ -1297,8 +1289,7 @@ TEST_F(ClientTest, TestScanCloseProxy) {
   }
 
   // Insert some test rows.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(),
-                                         FLAGS_test_scan_num_rows));
+  NO_FATALS(InsertTestRows(table.get(), FLAGS_test_scan_num_rows));
   {
     // Open and close a scanner with rows.
     KuduScanner scanner(table.get());
@@ -1329,8 +1320,7 @@ TEST_F(ClientTest, TestRowPtrNoRedaction) {
 
 TEST_F(ClientTest, TestScanYourWrites) {
   // Insert the rows
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(),
-                                         FLAGS_test_scan_num_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), FLAGS_test_scan_num_rows));
 
   // Verify that no matter which replica is selected, client could
   // achieve read-your-writes/read-your-reads.
@@ -1428,8 +1418,8 @@ TEST_F(ClientTest, TestScanFaultTolerance) {
   FLAGS_leader_failure_exp_backoff_max_delta_ms = 1000;
 
   const int kNumReplicas = 3;
-  ASSERT_NO_FATAL_FAILURE(CreateTable(kScanTable, kNumReplicas, {}, {}, &table));
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), FLAGS_test_scan_num_rows));
+  NO_FATALS(CreateTable(kScanTable, kNumReplicas, {}, {}, &table));
+  NO_FATALS(InsertTestRows(table.get(), FLAGS_test_scan_num_rows));
 
   // Do an initial scan to determine the expected rows for later verification.
   vector<string> expected_rows;
@@ -1455,14 +1445,14 @@ TEST_F(ClientTest, TestScanFaultTolerance) {
 
       // Restarting and waiting should result in a SCANNER_EXPIRED error.
       LOG(INFO) << "Doing a scan while restarting a tserver and waiting for it to come up...";
-      ASSERT_NO_FATAL_FAILURE(internal::DoScanWithCallback(table.get(), expected_rows, limit,
+      NO_FATALS(internal::DoScanWithCallback(table.get(), expected_rows, limit,
           boost::bind(&ClientTest_TestScanFaultTolerance_Test::RestartTServerAndWait,
                       this, _1)));
 
       // Restarting and not waiting means the tserver is hopefully bootstrapping, leading to
       // a TABLET_NOT_RUNNING error.
       LOG(INFO) << "Doing a scan while restarting a tserver...";
-      ASSERT_NO_FATAL_FAILURE(internal::DoScanWithCallback(table.get(), expected_rows, limit,
+      NO_FATALS(internal::DoScanWithCallback(table.get(), expected_rows, limit,
           boost::bind(&ClientTest_TestScanFaultTolerance_Test::RestartTServerAsync,
                       this, _1)));
       for (int i = 0; i < cluster_->num_tablet_servers(); i++) {
@@ -1472,7 +1462,7 @@ TEST_F(ClientTest, TestScanFaultTolerance) {
 
       // Killing the tserver should lead to an RPC timeout.
       LOG(INFO) << "Doing a scan while killing a tserver...";
-      ASSERT_NO_FATAL_FAILURE(internal::DoScanWithCallback(table.get(), expected_rows, limit,
+      NO_FATALS(internal::DoScanWithCallback(table.get(), expected_rows, limit,
           boost::bind(&ClientTest_TestScanFaultTolerance_Test::KillTServer,
                       this, _1)));
 
@@ -1497,8 +1487,8 @@ TEST_F(ClientTest, TestNonFaultTolerantScannerExpired) {
   shared_ptr<KuduTable> table;
 
   const int kNumReplicas = 1;
-  ASSERT_NO_FATAL_FAILURE(CreateTable(kScanTable, kNumReplicas, {}, {}, &table));
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), FLAGS_test_scan_num_rows));
+  NO_FATALS(CreateTable(kScanTable, kNumReplicas, {}, {}, &table));
+  NO_FATALS(InsertTestRows(table.get(), FLAGS_test_scan_num_rows));
 
   KuduScanner scanner(table.get());
   ASSERT_OK(scanner.SetTimeoutMillis(30 * 1000));
@@ -1560,11 +1550,11 @@ TEST_F(ClientTest, TestNonCoveringRangePartitions) {
   // Aggresively clear the meta cache between insert batches so that the meta
   // cache will execute GetTableLocation RPCs at different partition keys.
 
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), 50, 0));
+  NO_FATALS(InsertTestRows(table.get(), 50, 0));
   client_->data_->meta_cache_->ClearCache();
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), 50, 50));
+  NO_FATALS(InsertTestRows(table.get(), 50, 50));
   client_->data_->meta_cache_->ClearCache();
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), 100, 200));
+  NO_FATALS(InsertTestRows(table.get(), 100, 200));
   client_->data_->meta_cache_->ClearCache();
 
   // Insert out-of-range rows.
@@ -1770,8 +1760,8 @@ TEST_F(ClientTest, TestExclusiveInclusiveRangeBounds) {
   ASSERT_OK(alterer->Alter());
   ASSERT_OK(client_->OpenTable(table_name, &table));
 
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), 100, 0));
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), 100, 200));
+  NO_FATALS(InsertTestRows(table.get(), 100, 0));
+  NO_FATALS(InsertTestRows(table.get(), 100, 200));
 
   // Insert out-of-range rows.
   shared_ptr<KuduSession> session = client_->NewSession();
@@ -1981,11 +1971,11 @@ TEST_F(ClientTest, TestMetaCacheExpiry) {
 
 TEST_F(ClientTest, TestGetTabletServerBlacklist) {
   shared_ptr<KuduTable> table;
-  ASSERT_NO_FATAL_FAILURE(CreateTable("blacklist",
-                                      3,
-                                      GenerateSplitRows(),
-                                      {},
-                                      &table));
+  NO_FATALS(CreateTable("blacklist",
+                        3,
+                        GenerateSplitRows(),
+                        {},
+                        &table));
   InsertTestRows(table.get(), 1, 0);
 
   // Look up the tablet and its replicas into the metadata cache.
@@ -2058,16 +2048,16 @@ TEST_F(ClientTest, TestGetTabletServerBlacklist) {
 
 TEST_F(ClientTest, TestScanWithEncodedRangePredicate) {
   shared_ptr<KuduTable> table;
-  ASSERT_NO_FATAL_FAILURE(CreateTable("split-table",
-                                      1, /* replicas */
-                                      GenerateSplitRows(),
-                                      {},
-                                      &table));
+  NO_FATALS(CreateTable("split-table",
+                        1, /* replicas */
+                        GenerateSplitRows(),
+                        {},
+                        &table));
 
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), 100));
+  NO_FATALS(InsertTestRows(table.get(), 100));
 
   vector<string> all_rows;
-  ASSERT_NO_FATAL_FAILURE(ScanTableToStrings(table.get(), &all_rows));
+  NO_FATALS(ScanTableToStrings(table.get(), &all_rows));
   ASSERT_EQ(100, all_rows.size());
 
   unique_ptr<KuduPartialRow> row(table->schema().NewRow());
@@ -2182,7 +2172,7 @@ int64_t SumResults(const KuduScanBatch& batch) {
 } // anonymous namespace
 
 TEST_F(ClientTest, TestScannerKeepAlive) {
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(), 1000));
+  NO_FATALS(InsertTestRows(client_table_.get(), 1000));
   // Set the scanner ttl really low
   FLAGS_scanner_ttl_ms = 100; // 100 milliseconds
   // Start a scan but don't get the whole data back
@@ -2249,7 +2239,7 @@ TEST_F(ClientTest, TestScannerKeepAlive) {
 
 // Test cleanup of scanners on the server side when closed.
 TEST_F(ClientTest, TestCloseScanner) {
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(), 10));
+  NO_FATALS(InsertTestRows(client_table_.get(), 10));
 
   const tserver::ScannerManager* manager =
     cluster_->mini_tablet_server(0)->server()->scanner_manager();
@@ -2301,7 +2291,7 @@ TEST_F(ClientTest, TestScanTimeout) {
 
   // Warm the cache so that the subsequent timeout occurs within the scan,
   // not the lookup.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(), 1));
+  NO_FATALS(InsertTestRows(client_table_.get(), 1));
 
   // The "overall operation" timed out; no replicas failed.
   {
@@ -2314,7 +2304,7 @@ TEST_F(ClientTest, TestScanTimeout) {
 
   // Insert some more rows so that the scan takes multiple batches, instead of
   // fetching all the data on the 'Open()' call.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(), 1000, 1));
+  NO_FATALS(InsertTestRows(client_table_.get(), 1000, 1));
   {
     google::FlagSaver saver;
     FLAGS_scanner_max_batch_size_bytes = 100;
@@ -4194,17 +4184,17 @@ TEST_F(ClientTest, TestReplicatedMultiTabletTable) {
   const int kNumReplicas = 3;
 
   shared_ptr<KuduTable> table;
-  ASSERT_NO_FATAL_FAILURE(CreateTable(kReplicatedTable,
-                                      kNumReplicas,
-                                      GenerateSplitRows(),
-                                      {},
-                                      &table));
+  NO_FATALS(CreateTable(kReplicatedTable,
+                        kNumReplicas,
+                        GenerateSplitRows(),
+                        {},
+                        &table));
 
   // Should have no rows to begin with.
   ASSERT_EQ(0, CountRowsFromClient(table.get()));
 
   // Insert some data.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), kNumRowsToWrite));
+  NO_FATALS(InsertTestRows(table.get(), kNumRowsToWrite));
 
   // Should now see the data.
   ASSERT_EQ(kNumRowsToWrite, CountRowsFromClient(table.get()));
@@ -4220,14 +4210,14 @@ TEST_F(ClientTest, TestReplicatedMultiTabletTableFailover) {
   const int kNumTries = 100;
 
   shared_ptr<KuduTable> table;
-  ASSERT_NO_FATAL_FAILURE(CreateTable(kReplicatedTable,
-                                      kNumReplicas,
-                                      GenerateSplitRows(),
-                                      {},
-                                      &table));
+  NO_FATALS(CreateTable(kReplicatedTable,
+                        kNumReplicas,
+                        GenerateSplitRows(),
+                        {},
+                        &table));
 
   // Insert some data.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), kNumRowsToWrite));
+  NO_FATALS(InsertTestRows(table.get(), kNumRowsToWrite));
 
   // Find the leader of the first tablet.
   scoped_refptr<internal::RemoteTablet> rt = MetaCacheLookup(table.get(), "");
@@ -4269,24 +4259,16 @@ TEST_F(ClientTest, TestReplicatedMultiTabletTableFailover) {
 
 // This test that we can keep writing to a tablet when the leader
 // tablet dies.
-// This currently forces leader promotion through RPC and creates
-// a new client afterwards.
 TEST_F(ClientTest, TestReplicatedTabletWritesWithLeaderElection) {
   const string kReplicatedTable = "replicated_failover_on_writes";
   const int kNumRowsToWrite = 100;
   const int kNumReplicas = 3;
 
   shared_ptr<KuduTable> table;
-  ASSERT_NO_FATAL_FAILURE(CreateTable(kReplicatedTable, kNumReplicas, {}, {}, &table));
+  NO_FATALS(CreateTable(kReplicatedTable, kNumReplicas, {}, {}, &table));
 
   // Insert some data.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(table.get(), kNumRowsToWrite));
-
-  // TODO: we have to sleep here to make sure that the leader has time to
-  // propagate the writes to the followers. We can remove this once the
-  // followers run a leader election on their own and handle advancing
-  // the commit index.
-  SleepFor(MonoDelta::FromMilliseconds(1500));
+  NO_FATALS(InsertTestRows(table.get(), kNumRowsToWrite));
 
   // Find the leader replica
   scoped_refptr<internal::RemoteTablet> rt = MetaCacheLookup(table.get(), "");
@@ -4305,16 +4287,10 @@ TEST_F(ClientTest, TestReplicatedTabletWritesWithLeaderElection) {
   ASSERT_OK(KillTServer(killed_uuid));
 
   LOG(INFO) << "Inserting additional rows...";
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_.get(),
-                                         table.get(),
-                                         kNumRowsToWrite,
-                                         kNumRowsToWrite));
-
-  // TODO: we have to sleep here to make sure that the leader has time to
-  // propagate the writes to the followers. We can remove this once the
-  // followers run a leader election on their own and handle advancing
-  // the commit index.
-  SleepFor(MonoDelta::FromMilliseconds(1500));
+  NO_FATALS(InsertTestRows(client_.get(),
+                           table.get(),
+                           kNumRowsToWrite,
+                           kNumRowsToWrite));
 
   LOG(INFO) << "Counting rows...";
   ASSERT_EQ(2 * kNumRowsToWrite, CountRowsFromClient(table.get(),
@@ -4383,7 +4359,7 @@ TEST_F(ClientTest, TestRandomWriteOperation) {
     if (i % 50 == 0) {
       LOG(INFO) << "Correctness test " << i;
       FlushSessionOrDie(session);
-      ASSERT_NO_FATAL_FAILURE(CheckCorrectness(&scanner, row, nrows));
+      NO_FATALS(CheckCorrectness(&scanner, row, nrows));
       LOG(INFO) << "...complete";
     }
 
@@ -4420,7 +4396,7 @@ TEST_F(ClientTest, TestRandomWriteOperation) {
 
   // And one more time for the last batch.
   FlushSessionOrDie(session);
-  ASSERT_NO_FATAL_FAILURE(CheckCorrectness(&scanner, row, nrows));
+  NO_FATALS(CheckCorrectness(&scanner, row, nrows));
 }
 
 // Test whether a batch can handle several mutations in a batch
@@ -4485,8 +4461,7 @@ TEST_F(ClientTest, TestSeveralRowMutatesPerBatch) {
 // rows are inserted.
 TEST_F(ClientTest, TestMasterLookupPermits) {
   int initial_value = client_->data_->meta_cache_->master_lookup_sem_.GetValue();
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(),
-                                         FLAGS_test_scan_num_rows));
+  NO_FATALS(InsertTestRows(client_table_.get(), FLAGS_test_scan_num_rows));
   ASSERT_EQ(initial_value,
             client_->data_->meta_cache_->master_lookup_sem_.GetValue());
 }
@@ -4950,7 +4925,7 @@ TEST_P(LatestObservedTimestampParamTest, Test) {
   // Check that a write updates the latest observed timestamp.
   const uint64_t ts0 = client_->GetLatestObservedTimestamp();
   ASSERT_EQ(KuduClient::kNoTimestamp, ts0);
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(), 1, 0));
+  NO_FATALS(InsertTestRows(client_table_.get(), 1, 0));
   const uint64_t ts1 = client_->GetLatestObservedTimestamp();
   ASSERT_NE(ts0, ts1);
 
@@ -5067,8 +5042,7 @@ TEST_F(ClientTest, TestReadAtSnapshotNoTimestampSet) {
       CHECK_OK(row->SetInt32(0, i * kRowsPerTablet));
       rows.push_back(std::move(row));
     }
-    ASSERT_NO_FATAL_FAILURE(CreateTable("test_table", 1,
-                                        std::move(rows), {}, &table));
+    NO_FATALS(CreateTable("test_table", 1, std::move(rows), {}, &table));
     // Insert some data into the table, so each tablet would get populated.
     shared_ptr<KuduSession> session(client_->NewSession());
     ASSERT_OK(session->SetFlushMode(KuduSession::MANUAL_FLUSH));
@@ -5435,8 +5409,7 @@ INSTANTIATE_TEST_CASE_P(BinaryColEncodings,
                         ::testing::Values(kPlainBin, kPrefix, kDictionary));
 
 TEST_F(ClientTest, TestClonePredicates) {
-  ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(),
-                                         2, 0));
+  NO_FATALS(InsertTestRows(client_table_.get(), 2, 0));
   unique_ptr<KuduPredicate> predicate(client_table_->NewComparisonPredicate(
       "key",
       KuduPredicate::EQUAL,
@@ -5593,7 +5566,7 @@ TEST_F(ClientTest, TestBatchScanConstIterator) {
   {
     // Insert a few rows
     const int kRowNum = 2;
-    ASSERT_NO_FATAL_FAILURE(InsertTestRows(client_table_.get(), kRowNum));
+    NO_FATALS(InsertTestRows(client_table_.get(), kRowNum));
 
     KuduScanner scanner(client_table_.get());
     ASSERT_OK(scanner.Open());
diff --git a/src/kudu/common/wire_protocol-test.cc b/src/kudu/common/wire_protocol-test.cc
index 0317cab..49dc500 100644
--- a/src/kudu/common/wire_protocol-test.cc
+++ b/src/kudu/common/wire_protocol-test.cc
@@ -579,7 +579,7 @@ TEST_F(WireProtocolTest, TestColumnPredicateInList) {
 
     kudu::ColumnPredicate cp = kudu::ColumnPredicate::InList(col1, &values);
     ColumnPredicatePB pb;
-    ASSERT_NO_FATAL_FAILURE(ColumnPredicateToPB(cp, &pb));
+    NO_FATALS(ColumnPredicateToPB(cp, &pb));
 
     ASSERT_OK(ColumnPredicateFromPB(schema, &arena, pb, &predicate));
     ASSERT_EQ(predicate->predicate_type(), PredicateType::InList);
diff --git a/src/kudu/consensus/consensus_queue-test.cc b/src/kudu/consensus/consensus_queue-test.cc
index 840816c..807681b 100644
--- a/src/kudu/consensus/consensus_queue-test.cc
+++ b/src/kudu/consensus/consensus_queue-test.cc
@@ -590,11 +590,11 @@ TEST_F(ConsensusQueueTest, TestQueueLoadsOperationsForPeer) {
 
   // Now we start tracking the peer, this negotiation round should let
   // the queue know how far along the peer is.
-  ASSERT_NO_FATAL_FAILURE(UpdatePeerWatermarkToOp(&request,
-                                                  &response,
-                                                  peers_last_op,
-                                                  MinimumOpId(),
-                                                  &send_more_immediately));
+  NO_FATALS(UpdatePeerWatermarkToOp(&request,
+                                    &response,
+                                    peers_last_op,
+                                    MinimumOpId(),
+                                    &send_more_immediately));
 
   // The queue should reply that there are more messages for the peer.
   ASSERT_TRUE(send_more_immediately);
diff --git a/src/kudu/consensus/leader_election-test.cc b/src/kudu/consensus/leader_election-test.cc
index f0f7b47..245fb47 100644
--- a/src/kudu/consensus/leader_election-test.cc
+++ b/src/kudu/consensus/leader_election-test.cc
@@ -495,16 +495,16 @@ TEST_F(VoteCounterTest, TestVoteCounter_EarlyDecision) {
   {
     // Start off undecided.
     VoteCounter counter(kNumVoters, kMajoritySize);
-    ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-    ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 0, 0));
+    NO_FATALS(AssertUndecided(counter));
+    NO_FATALS(AssertVoteCount(counter, 0, 0));
     ASSERT_FALSE(counter.AreAllVotesIn());
 
     // First yes vote.
     bool duplicate;
     ASSERT_OK(counter.RegisterVote(voter_uuids[0], VOTE_GRANTED, &duplicate));
     ASSERT_FALSE(duplicate);
-    ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-    ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 1, 0));
+    NO_FATALS(AssertUndecided(counter));
+    NO_FATALS(AssertVoteCount(counter, 1, 0));
     ASSERT_FALSE(counter.AreAllVotesIn());
 
     // Second yes vote wins it in a configuration of 3.
@@ -514,7 +514,7 @@ TEST_F(VoteCounterTest, TestVoteCounter_EarlyDecision) {
     ElectionVote decision;
     ASSERT_OK(counter.GetDecision(&decision));
     ASSERT_TRUE(decision == VOTE_GRANTED);
-    ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 2, 0));
+    NO_FATALS(AssertVoteCount(counter, 2, 0));
     ASSERT_FALSE(counter.AreAllVotesIn());
   }
 
@@ -522,16 +522,16 @@ TEST_F(VoteCounterTest, TestVoteCounter_EarlyDecision) {
   {
     // Start off undecided.
     VoteCounter counter(kNumVoters, kMajoritySize);
-    ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-    ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 0, 0));
+    NO_FATALS(AssertUndecided(counter));
+    NO_FATALS(AssertVoteCount(counter, 0, 0));
     ASSERT_FALSE(counter.AreAllVotesIn());
 
     // First no vote.
     bool duplicate;
     ASSERT_OK(counter.RegisterVote(voter_uuids[0], VOTE_DENIED, &duplicate));
     ASSERT_FALSE(duplicate);
-    ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-    ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 0, 1));
+    NO_FATALS(AssertUndecided(counter));
+    NO_FATALS(AssertVoteCount(counter, 0, 1));
     ASSERT_FALSE(counter.AreAllVotesIn());
 
     // Second no vote loses it in a configuration of 3.
@@ -541,7 +541,7 @@ TEST_F(VoteCounterTest, TestVoteCounter_EarlyDecision) {
     ElectionVote decision;
     ASSERT_OK(counter.GetDecision(&decision));
     ASSERT_TRUE(decision == VOTE_DENIED);
-    ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 0, 2));
+    NO_FATALS(AssertVoteCount(counter, 0, 2));
     ASSERT_FALSE(counter.AreAllVotesIn());
   }
 }
@@ -554,23 +554,23 @@ TEST_F(VoteCounterTest, TestVoteCounter_LateDecision) {
 
   // Start off undecided.
   VoteCounter counter(kNumVoters, kMajoritySize);
-  ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 0, 0));
+  NO_FATALS(AssertUndecided(counter));
+  NO_FATALS(AssertVoteCount(counter, 0, 0));
   ASSERT_FALSE(counter.AreAllVotesIn());
 
   // Add single yes vote, still undecided.
   bool duplicate;
   ASSERT_OK(counter.RegisterVote(voter_uuids[0], VOTE_GRANTED, &duplicate));
   ASSERT_FALSE(duplicate);
-  ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 1, 0));
+  NO_FATALS(AssertUndecided(counter));
+  NO_FATALS(AssertVoteCount(counter, 1, 0));
   ASSERT_FALSE(counter.AreAllVotesIn());
 
   // Attempt duplicate vote.
   ASSERT_OK(counter.RegisterVote(voter_uuids[0], VOTE_GRANTED, &duplicate));
   ASSERT_TRUE(duplicate);
-  ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 1, 0));
+  NO_FATALS(AssertUndecided(counter));
+  NO_FATALS(AssertVoteCount(counter, 1, 0));
   ASSERT_FALSE(counter.AreAllVotesIn());
 
   // Attempt to change vote.
@@ -578,27 +578,27 @@ TEST_F(VoteCounterTest, TestVoteCounter_LateDecision) {
   ASSERT_TRUE(s.IsInvalidArgument());
   ASSERT_STR_CONTAINS(s.ToString(), "voted a different way twice");
   LOG(INFO) << "Expected vote-changed error: " << s.ToString();
-  ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 1, 0));
+  NO_FATALS(AssertUndecided(counter));
+  NO_FATALS(AssertVoteCount(counter, 1, 0));
   ASSERT_FALSE(counter.AreAllVotesIn());
 
   // Add more votes...
   ASSERT_OK(counter.RegisterVote(voter_uuids[1], VOTE_DENIED, &duplicate));
   ASSERT_FALSE(duplicate);
-  ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 1, 1));
+  NO_FATALS(AssertUndecided(counter));
+  NO_FATALS(AssertVoteCount(counter, 1, 1));
   ASSERT_FALSE(counter.AreAllVotesIn());
 
   ASSERT_OK(counter.RegisterVote(voter_uuids[2], VOTE_GRANTED, &duplicate));
   ASSERT_FALSE(duplicate);
-  ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 2, 1));
+  NO_FATALS(AssertUndecided(counter));
+  NO_FATALS(AssertVoteCount(counter, 2, 1));
   ASSERT_FALSE(counter.AreAllVotesIn());
 
   ASSERT_OK(counter.RegisterVote(voter_uuids[3], VOTE_DENIED, &duplicate));
   ASSERT_FALSE(duplicate);
-  ASSERT_NO_FATAL_FAILURE(AssertUndecided(counter));
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 2, 2));
+  NO_FATALS(AssertUndecided(counter));
+  NO_FATALS(AssertVoteCount(counter, 2, 2));
   ASSERT_FALSE(counter.AreAllVotesIn());
 
   // Win the election.
@@ -608,7 +608,7 @@ TEST_F(VoteCounterTest, TestVoteCounter_LateDecision) {
   ElectionVote decision;
   ASSERT_OK(counter.GetDecision(&decision));
   ASSERT_TRUE(decision == VOTE_GRANTED);
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 3, 2));
+  NO_FATALS(AssertVoteCount(counter, 3, 2));
   ASSERT_TRUE(counter.AreAllVotesIn());
 
   // Attempt to vote with > the whole configuration.
@@ -617,7 +617,7 @@ TEST_F(VoteCounterTest, TestVoteCounter_LateDecision) {
   ASSERT_STR_CONTAINS(s.ToString(), "cause the number of votes to exceed the expected number");
   LOG(INFO) << "Expected voters-exceeded error: " << s.ToString();
   ASSERT_TRUE(counter.IsDecided());
-  ASSERT_NO_FATAL_FAILURE(AssertVoteCount(counter, 3, 2));
+  NO_FATALS(AssertVoteCount(counter, 3, 2));
   ASSERT_TRUE(counter.AreAllVotesIn());
 }
 
diff --git a/src/kudu/consensus/mt-log-test.cc b/src/kudu/consensus/mt-log-test.cc
index fb142cd..d4ced17 100644
--- a/src/kudu/consensus/mt-log-test.cc
+++ b/src/kudu/consensus/mt-log-test.cc
@@ -236,11 +236,11 @@ TEST_F(MultiThreadedLogTest, TestAppends) {
                                        FLAGS_num_writer_threads * FLAGS_num_batches_per_thread,
                                        FLAGS_num_writer_threads,
                                        FLAGS_num_batches_per_thread)) {
-    ASSERT_NO_FATAL_FAILURE(Run());
+    NO_FATALS(Run());
   }
   ASSERT_OK(log_->Close());
   if (FLAGS_verify_log) {
-    ASSERT_NO_FATAL_FAILURE(VerifyLog());
+    NO_FATALS(VerifyLog());
   }
 }
 
@@ -255,7 +255,7 @@ TEST_F(MultiThreadedLogTest, TestAppendThreadStartStopRaces) {
   ASSERT_OK(BuildLog());
   LogWriterThread(1);
   ASSERT_OK(log_->Close());
-  ASSERT_NO_FATAL_FAILURE(VerifyLog());
+  NO_FATALS(VerifyLog());
 }
 
 } // namespace log
diff --git a/src/kudu/consensus/raft_consensus_quorum-test.cc b/src/kudu/consensus/raft_consensus_quorum-test.cc
index a6dbab0..75edcab 100644
--- a/src/kudu/consensus/raft_consensus_quorum-test.cc
+++ b/src/kudu/consensus/raft_consensus_quorum-test.cc
@@ -1045,8 +1045,8 @@ TEST_F(RaftConsensusQuorumTest, TestRequestVote) {
                               &response));
   ASSERT_TRUE(response.vote_granted());
   ASSERT_EQ(last_op_id.term() + 1, response.responder_term());
-  ASSERT_NO_FATAL_FAILURE(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 1,
-                                                   fs_managers_[0]->uuid()));
+  NO_FATALS(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 1,
+                                     fs_managers_[0]->uuid()));
   ASSERT_EQ(1, flush_count() - flush_count_before)
       << "A granted vote should flush only once";
 
@@ -1071,8 +1071,8 @@ TEST_F(RaftConsensusQuorumTest, TestRequestVote) {
   ASSERT_TRUE(response.has_consensus_error());
   ASSERT_EQ(ConsensusErrorPB::ALREADY_VOTED, response.consensus_error().code());
   ASSERT_EQ(last_op_id.term() + 1, response.responder_term());
-  ASSERT_NO_FATAL_FAILURE(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 1,
-                                                   fs_managers_[0]->uuid()));
+  NO_FATALS(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 1,
+                                     fs_managers_[0]->uuid()));
   ASSERT_EQ(0, flush_count() - flush_count_before)
       << "Rejected votes for same term should not flush";
 
@@ -1091,8 +1091,8 @@ TEST_F(RaftConsensusQuorumTest, TestRequestVote) {
                               &response));
   ASSERT_TRUE(response.vote_granted());
   ASSERT_EQ(last_op_id.term() + 2, response.responder_term());
-  ASSERT_NO_FATAL_FAILURE(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 2,
-                                                   fs_managers_[0]->uuid()));
+  NO_FATALS(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 2,
+                                     fs_managers_[0]->uuid()));
   ASSERT_EQ(1, flush_count() - flush_count_before)
       << "Accepted votes with increased term should flush once";
 
@@ -1109,8 +1109,8 @@ TEST_F(RaftConsensusQuorumTest, TestRequestVote) {
   ASSERT_TRUE(response.has_consensus_error());
   ASSERT_EQ(ConsensusErrorPB::INVALID_TERM, response.consensus_error().code());
   ASSERT_EQ(last_op_id.term() + 2, response.responder_term());
-  ASSERT_NO_FATAL_FAILURE(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 2,
-                                                   fs_managers_[0]->uuid()));
+  NO_FATALS(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 2,
+                                     fs_managers_[0]->uuid()));
   ASSERT_EQ(0, flush_count() - flush_count_before)
       << "Rejected votes for old terms should not flush";
 
@@ -1126,8 +1126,8 @@ TEST_F(RaftConsensusQuorumTest, TestRequestVote) {
   ASSERT_TRUE(response.vote_granted());
   ASSERT_FALSE(response.has_consensus_error());
   ASSERT_EQ(last_op_id.term() + 2, response.responder_term());
-  ASSERT_NO_FATAL_FAILURE(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 2,
-                                                   fs_managers_[0]->uuid()));
+  NO_FATALS(AssertDurableTermAndVote(kPeerIndex, last_op_id.term() + 2,
+                                     fs_managers_[0]->uuid()));
   ASSERT_EQ(0, flush_count() - flush_count_before)
       << "Pre-elections should not flush";
   request.set_is_pre_election(false);
@@ -1148,7 +1148,7 @@ TEST_F(RaftConsensusQuorumTest, TestRequestVote) {
   ASSERT_TRUE(response.has_consensus_error());
   ASSERT_EQ(ConsensusErrorPB::LAST_OPID_TOO_OLD, response.consensus_error().code());
   ASSERT_EQ(last_op_id.term() + 3, response.responder_term());
-  ASSERT_NO_FATAL_FAILURE(AssertDurableTermWithoutVote(kPeerIndex, last_op_id.term() + 3));
+  NO_FATALS(AssertDurableTermWithoutVote(kPeerIndex, last_op_id.term() + 3));
   ASSERT_EQ(1, flush_count() - flush_count_before)
       << "Rejected votes for old op index but new term should flush once.";
 
diff --git a/src/kudu/fs/block_manager-test.cc b/src/kudu/fs/block_manager-test.cc
index d83344b..c3135f3 100644
--- a/src/kudu/fs/block_manager-test.cc
+++ b/src/kudu/fs/block_manager-test.cc
@@ -649,7 +649,7 @@ TYPED_TEST(BlockManagerTest, AbortTest) {
   ASSERT_TRUE(this->bm_->OpenBlock(written_block->id(), nullptr)
               .IsNotFound());
 
-  ASSERT_NO_FATAL_FAILURE(CheckMetrics(entity, 0, 0, 0, 2, 0, test_data.size() * 2));
+  NO_FATALS(CheckMetrics(entity, 0, 0, 0, 2, 0, test_data.size() * 2));
 }
 
 TYPED_TEST(BlockManagerTest, PersistenceTest) {
@@ -718,7 +718,7 @@ TYPED_TEST(BlockManagerTest, BlockDistributionTest) {
                                      paths,
                                      true /* create */,
                                      false /* load_test_group */));
-  ASSERT_NO_FATAL_FAILURE(this->RunBlockDistributionTest(paths));
+  NO_FATALS(this->RunBlockDistributionTest(paths));
 }
 
 TYPED_TEST(BlockManagerTest, MultiPathTest) {
@@ -734,7 +734,7 @@ TYPED_TEST(BlockManagerTest, MultiPathTest) {
                                      true /* create */,
                                      false /* load_test_group */));
 
-  ASSERT_NO_FATAL_FAILURE(this->RunMultipathTest(paths));
+  NO_FATALS(this->RunMultipathTest(paths));
 }
 
 static void CloseHelper(ReadableBlock* block) {
@@ -770,7 +770,7 @@ TYPED_TEST(BlockManagerTest, MetricsTest) {
                                      shared_ptr<MemTracker>(),
                                      { this->test_dir_ },
                                      false));
-  ASSERT_NO_FATAL_FAILURE(CheckMetrics(entity, 0, 0, 0, 0, 0, 0));
+  NO_FATALS(CheckMetrics(entity, 0, 0, 0, 0, 0, 0));
 
   for (int i = 0; i < 3; i++) {
     unique_ptr<WritableBlock> writer;
@@ -778,7 +778,7 @@ TYPED_TEST(BlockManagerTest, MetricsTest) {
 
     // An open writer. Also reflected in total_writable_blocks.
     ASSERT_OK(this->bm_->CreateBlock(this->test_block_opts_, &writer));
-    ASSERT_NO_FATAL_FAILURE(CheckMetrics(
+    NO_FATALS(CheckMetrics(
         entity, 0, 1, i, i + 1,
         i * kTestData.length(), i * kTestData.length()));
 
@@ -786,13 +786,13 @@ TYPED_TEST(BlockManagerTest, MetricsTest) {
     // is now reflected in total_bytes_written.
     ASSERT_OK(writer->Append(kTestData));
     ASSERT_OK(writer->Close());
-    ASSERT_NO_FATAL_FAILURE(CheckMetrics(
+    NO_FATALS(CheckMetrics(
         entity, 0, 0, i, i + 1,
         i * kTestData.length(), (i + 1) * kTestData.length()));
 
     // An open reader.
     ASSERT_OK(this->bm_->OpenBlock(writer->id(), &reader));
-    ASSERT_NO_FATAL_FAILURE(CheckMetrics(
+    NO_FATALS(CheckMetrics(
         entity, 1, 0, i + 1, i + 1,
         i * kTestData.length(), (i + 1) * kTestData.length()));
 
@@ -800,20 +800,20 @@ TYPED_TEST(BlockManagerTest, MetricsTest) {
     gscoped_ptr<uint8_t[]> scratch(new uint8_t[kTestData.length()]);
     Slice data(scratch.get(), kTestData.length());
     ASSERT_OK(reader->Read(0, data));
-    ASSERT_NO_FATAL_FAILURE(CheckMetrics(
+    NO_FATALS(CheckMetrics(
         entity, 1, 0, i + 1, i + 1,
         (i + 1) * kTestData.length(), (i + 1) * kTestData.length()));
 
     // The reader is now gone.
     ASSERT_OK(reader->Close());
-    ASSERT_NO_FATAL_FAILURE(CheckMetrics(
+    NO_FATALS(CheckMetrics(
         entity, 0, 0, i + 1, i + 1,
         (i + 1) * kTestData.length(), (i + 1) * kTestData.length()));
   }
 }
 
 TYPED_TEST(BlockManagerTest, MemTrackerTest) {
-  ASSERT_NO_FATAL_FAILURE(this->RunMemTrackerTest());
+  NO_FATALS(this->RunMemTrackerTest());
 }
 
 TYPED_TEST(BlockManagerTest, TestDiskSpaceCheck) {
diff --git a/src/kudu/integration-tests/alter_table-test.cc b/src/kudu/integration-tests/alter_table-test.cc
index 5ce0e12..6333dfe 100644
--- a/src/kudu/integration-tests/alter_table-test.cc
+++ b/src/kudu/integration-tests/alter_table-test.cc
@@ -856,7 +856,7 @@ TEST_F(AlterTableTest, TestBootstrapAfterAlters) {
 
   // Test that restart doesn't fail when trying to replay updates or inserts
   // with the dropped column.
-  ASSERT_NO_FATAL_FAILURE(RestartTabletServer());
+  NO_FATALS(RestartTabletServer());
 
   NO_FATALS(ScanToStrings(&rows));
   ASSERT_EQ(2, rows.size());
@@ -870,7 +870,7 @@ TEST_F(AlterTableTest, TestBootstrapAfterAlters) {
   ASSERT_EQ("(int32 c0=0, int32 c2=12345, int32 c1=20000)", rows[0]);
   ASSERT_EQ("(int32 c0=16777216, int32 c2=12345, int32 c1=20000)", rows[1]);
 
-  ASSERT_NO_FATAL_FAILURE(RestartTabletServer());
+  NO_FATALS(RestartTabletServer());
   NO_FATALS(ScanToStrings(&rows));
   ASSERT_EQ(2, rows.size());
   ASSERT_EQ("(int32 c0=0, int32 c2=12345, int32 c1=20000)", rows[0]);
diff --git a/src/kudu/integration-tests/create-table-stress-test.cc b/src/kudu/integration-tests/create-table-stress-test.cc
index f0b71ee..7a147b1 100644
--- a/src/kudu/integration-tests/create-table-stress-test.cc
+++ b/src/kudu/integration-tests/create-table-stress-test.cc
@@ -176,7 +176,7 @@ TEST_F(CreateTableStressTest, CreateAndDeleteBigTable) {
     return;
   }
   string table_name = "test_table";
-  ASSERT_NO_FATAL_FAILURE(CreateBigTable(table_name, FLAGS_num_test_tablets));
+  NO_FATALS(CreateBigTable(table_name, FLAGS_num_test_tablets));
   master::GetTableLocationsResponsePB resp;
   ASSERT_OK(WaitForRunningTabletCount(cluster_->mini_master(), table_name,
                                       FLAGS_num_test_tablets, &resp));
@@ -211,7 +211,7 @@ TEST_F(CreateTableStressTest, RestartMasterDuringCreation) {
   }
 
   string table_name = "test_table";
-  ASSERT_NO_FATAL_FAILURE(CreateBigTable(table_name, FLAGS_num_test_tablets));
+  NO_FATALS(CreateBigTable(table_name, FLAGS_num_test_tablets));
 
   for (int i = 0; i < 3; i++) {
     SleepFor(MonoDelta::FromMicroseconds(500));
@@ -241,7 +241,7 @@ TEST_F(CreateTableStressTest, TestGetTableLocationsOptions) {
   string table_name = "test_table";
   LOG(INFO) << CURRENT_TEST_NAME() << ": Step 1. Creating big table " << table_name << " ...";
   LOG_TIMING(INFO, "creating big table") {
-    ASSERT_NO_FATAL_FAILURE(CreateBigTable(table_name, FLAGS_num_test_tablets));
+    NO_FATALS(CreateBigTable(table_name, FLAGS_num_test_tablets));
   }
 
   master::GetTableLocationsRequestPB req;
diff --git a/src/kudu/integration-tests/linked_list-test.cc b/src/kudu/integration-tests/linked_list-test.cc
index 52cfc5b..eddc34c 100644
--- a/src/kudu/integration-tests/linked_list-test.cc
+++ b/src/kudu/integration-tests/linked_list-test.cc
@@ -214,7 +214,7 @@ TEST_F(LinkedListTest, TestLoadAndVerify) {
                                          this, _1)));
     LOG(INFO) << "Done with tserver kill test.";
     ASSERT_OK(CheckTabletServersAreAlive(tablet_servers_.size()-1));
-    ASSERT_NO_FATAL_FAILURE(RestartCluster());
+    NO_FATALS(RestartCluster());
     // Again wait for cluster to finish bootstrapping.
     WaitForTSAndReplicas();
 
@@ -230,7 +230,7 @@ TEST_F(LinkedListTest, TestLoadAndVerify) {
   }
 
   // Kill and restart the cluster, verify data remains.
-  ASSERT_NO_FATAL_FAILURE(RestartCluster());
+  NO_FATALS(RestartCluster());
 
   LOG(INFO) << "Verifying rows after restarting entire cluster.";
 
@@ -260,13 +260,13 @@ TEST_F(LinkedListTest, TestLoadAndVerify) {
     ASSERT_OK(CheckTabletServersAreAlive(tablet_servers_.size() - 1));
   }
 
-  ASSERT_NO_FATAL_FAILURE(RestartCluster());
+  NO_FATALS(RestartCluster());
 
   // Sleep a little bit, so that the tablet is probably in bootstrapping state.
   SleepFor(MonoDelta::FromMilliseconds(100));
 
   // Restart while bootstrapping
-  ASSERT_NO_FATAL_FAILURE(RestartCluster());
+  NO_FATALS(RestartCluster());
 
   ASSERT_OK(tester_->WaitAndVerify(FLAGS_seconds_to_run, written));
   ASSERT_OK(CheckTabletServersAreAlive(tablet_servers_.size()));
diff --git a/src/kudu/integration-tests/master_failover-itest.cc b/src/kudu/integration-tests/master_failover-itest.cc
index 9dcaa97..5bc3465 100644
--- a/src/kudu/integration-tests/master_failover-itest.cc
+++ b/src/kudu/integration-tests/master_failover-itest.cc
@@ -109,7 +109,7 @@ class MasterFailoverTest : public KuduTest,
 
   virtual void SetUp() OVERRIDE {
     KuduTest::SetUp();
-    ASSERT_NO_FATAL_FAILURE(RestartCluster());
+    NO_FATALS(RestartCluster());
   }
 
   virtual void TearDown() OVERRIDE {
diff --git a/src/kudu/integration-tests/registration-test.cc b/src/kudu/integration-tests/registration-test.cc
index 4bb712d..0f6d93c 100644
--- a/src/kudu/integration-tests/registration-test.cc
+++ b/src/kudu/integration-tests/registration-test.cc
@@ -239,7 +239,7 @@ TEST_F(RegistrationTest, TestTSRegisters) {
         << "Should not include wildcards in registration";
   }
 
-  ASSERT_NO_FATAL_FAILURE(CheckTabletServersPage());
+  NO_FATALS(CheckTabletServersPage());
 
   // Restart the master, so it loses the descriptor, and ensure that the
   // heartbeater thread handles re-registering.
diff --git a/src/kudu/integration-tests/update_scan_delta_compact-test.cc b/src/kudu/integration-tests/update_scan_delta_compact-test.cc
index f3b30ae..13d4970 100644
--- a/src/kudu/integration-tests/update_scan_delta_compact-test.cc
+++ b/src/kudu/integration-tests/update_scan_delta_compact-test.cc
@@ -106,7 +106,7 @@ class UpdateScanDeltaCompactionTest : public KuduTest {
   }
 
   void CreateTable() {
-    ASSERT_NO_FATAL_FAILURE(InitCluster());
+    NO_FATALS(InitCluster());
     gscoped_ptr<KuduTableCreator> table_creator(client_->NewTableCreator());
     ASSERT_OK(table_creator->table_name(kTableName)
              .schema(&schema_)
@@ -196,9 +196,9 @@ TEST_F(UpdateScanDeltaCompactionTest, TestAll) {
     FLAGS_maintenance_manager_polling_interval_ms = 50;
   }
 
-  ASSERT_NO_FATAL_FAILURE(CreateTable());
-  ASSERT_NO_FATAL_FAILURE(InsertBaseData());
-  ASSERT_NO_FATAL_FAILURE(RunThreads());
+  NO_FATALS(CreateTable());
+  NO_FATALS(InsertBaseData());
+  NO_FATALS(RunThreads());
 }
 
 void UpdateScanDeltaCompactionTest::InsertBaseData() {
diff --git a/src/kudu/master/master-test.cc b/src/kudu/master/master-test.cc
index 045f144..25190cd 100644
--- a/src/kudu/master/master-test.cc
+++ b/src/kudu/master/master-test.cc
@@ -570,7 +570,7 @@ TEST_F(MasterTest, TestCatalog) {
   ASSERT_OK(CreateTable(kTableName, kTableSchema));
 
   ListTablesResponsePB tables;
-  ASSERT_NO_FATAL_FAILURE(DoListAllTables(&tables));
+  NO_FATALS(DoListAllTables(&tables));
   ASSERT_EQ(1, tables.tables_size());
   ASSERT_EQ(kTableName, tables.tables(0).name());
 
@@ -586,7 +586,7 @@ TEST_F(MasterTest, TestCatalog) {
   }
 
   // List tables, should show no table
-  ASSERT_NO_FATAL_FAILURE(DoListAllTables(&tables));
+  NO_FATALS(DoListAllTables(&tables));
   ASSERT_EQ(0, tables.tables_size());
 
   // Re-create the table
@@ -598,7 +598,7 @@ TEST_F(MasterTest, TestCatalog) {
   ASSERT_OK(mini_master_->master()->
       WaitUntilCatalogManagerIsLeaderAndReadyForTests(MonoDelta::FromSeconds(5)));
 
-  ASSERT_NO_FATAL_FAILURE(DoListAllTables(&tables));
+  NO_FATALS(DoListAllTables(&tables));
   ASSERT_EQ(1, tables.tables_size());
   ASSERT_EQ(kTableName, tables.tables(0).name());
 
@@ -1719,7 +1719,7 @@ TEST_F(MasterTest, TestTableIdentifierWithIdAndName) {
   ASSERT_OK(CreateTable(kTableName, kTableSchema));
 
   ListTablesResponsePB tables;
-  ASSERT_NO_FATAL_FAILURE(DoListAllTables(&tables));
+  NO_FATALS(DoListAllTables(&tables));
   ASSERT_EQ(1, tables.tables_size());
   ASSERT_EQ(kTableName, tables.tables(0).name());
   string table_id = tables.tables(0).id();
diff --git a/src/kudu/rpc/rpc-test.cc b/src/kudu/rpc/rpc-test.cc
index 88fe7fa..839198c 100644
--- a/src/kudu/rpc/rpc-test.cc
+++ b/src/kudu/rpc/rpc-test.cc
@@ -942,15 +942,15 @@ TEST_P(TestRpc, TestCallTimeout) {
   // Test a very short timeout - we expect this will time out while the
   // call is still trying to connect, or in the send queue. This was triggering ASAN failures
   // before.
-  ASSERT_NO_FATAL_FAILURE(DoTestExpectTimeout(p, MonoDelta::FromNanoseconds(1)));
+  NO_FATALS(DoTestExpectTimeout(p, MonoDelta::FromNanoseconds(1)));
 
   // Test a longer timeout - expect this will time out after we send the request,
   // but shorter than our threshold for two-stage timeout handling.
-  ASSERT_NO_FATAL_FAILURE(DoTestExpectTimeout(p, MonoDelta::FromMilliseconds(200)));
+  NO_FATALS(DoTestExpectTimeout(p, MonoDelta::FromMilliseconds(200)));
 
   // Test a longer timeout - expect this will trigger the "two-stage timeout"
   // code path.
-  ASSERT_NO_FATAL_FAILURE(DoTestExpectTimeout(p, MonoDelta::FromMilliseconds(1500)));
+  NO_FATALS(DoTestExpectTimeout(p, MonoDelta::FromMilliseconds(1500)));
 }
 
 // Inject 500ms delay in negotiation, and send a call with a short timeout, followed by
@@ -970,7 +970,7 @@ TEST_P(TestRpc, TestCallTimeoutDoesntAffectNegotiation) {
           GenericCalculatorService::static_service_name());
 
   FLAGS_rpc_negotiation_inject_delay_ms = 500;
-  ASSERT_NO_FATAL_FAILURE(DoTestExpectTimeout(p, MonoDelta::FromMilliseconds(50)));
+  NO_FATALS(DoTestExpectTimeout(p, MonoDelta::FromMilliseconds(50)));
   ASSERT_OK(DoTestSyncCall(p, GenericCalculatorService::kAddMethodName));
 
   // Only the second call should have been received by the server, because we
@@ -1015,7 +1015,7 @@ TEST_F(TestRpc, TestNegotiationTimeout) {
           GenericCalculatorService::static_service_name());
 
   bool is_negotiation_error = false;
-  ASSERT_NO_FATAL_FAILURE(DoTestExpectTimeout(
+  NO_FATALS(DoTestExpectTimeout(
       p, MonoDelta::FromMilliseconds(100), &is_negotiation_error));
   EXPECT_TRUE(is_negotiation_error);
 
diff --git a/src/kudu/tablet/compaction-test.cc b/src/kudu/tablet/compaction-test.cc
index fd0006d..c45a61f 100644
--- a/src/kudu/tablet/compaction-test.cc
+++ b/src/kudu/tablet/compaction-test.cc
@@ -389,7 +389,7 @@ class TestCompaction : public KuduRowSetTest {
       // Flush it to disk and re-open it.
       shared_ptr<DiskRowSet> rs;
       FlushMRSAndReopenNoRoll(*mrs, schema, &rs);
-      ASSERT_NO_FATAL_FAILURE();
+      NO_FATALS();
       rowsets.push_back(rs);
 
       // Perform some updates into DMS
@@ -399,7 +399,7 @@ class TestCompaction : public KuduRowSetTest {
 
     // Merge them.
     shared_ptr<DiskRowSet> result_rs;
-    ASSERT_NO_FATAL_FAILURE(CompactAndReopenNoRoll(rowsets, projection, &result_rs));
+    NO_FATALS(CompactAndReopenNoRoll(rowsets, projection, &result_rs));
 
     // Verify the resulting compaction output has the right number
     // of rows.
@@ -438,7 +438,7 @@ class TestCompaction : public KuduRowSetTest {
         }
         shared_ptr<DiskRowSet> rs;
         FlushMRSAndReopenNoRoll(*mrs, schema_, &rs);
-        ASSERT_NO_FATAL_FAILURE();
+        NO_FATALS();
         rowsets.push_back(rs);
       }
     } else {
@@ -557,7 +557,7 @@ TEST_F(TestCompaction, TestRowSetInput) {
                                 mem_trackers_.tablet_tracker, &mrs));
     InsertRows(mrs.get(), 10, 0);
     FlushMRSAndReopenNoRoll(*mrs, schema_, &rs);
-    ASSERT_NO_FATAL_FAILURE();
+    NO_FATALS();
   }
 
   // Update the rows in the rowset.
@@ -597,7 +597,7 @@ TEST_F(TestCompaction, TestDuplicatedGhostRowsMerging) {
                                 mem_trackers_.tablet_tracker, &mrs));
     InsertRows(mrs.get(), 10, 0);
     FlushMRSAndReopenNoRoll(*mrs, schema_, &rs1);
-    ASSERT_NO_FATAL_FAILURE();
+    NO_FATALS();
   }
   // Now delete the rows, this will make the rs report them as deleted and
   // so we would reinsert them into the MRS.
@@ -611,7 +611,7 @@ TEST_F(TestCompaction, TestDuplicatedGhostRowsMerging) {
     InsertRows(mrs.get(), 10, 0);
     UpdateRows(mrs.get(), 10, 0, 1);
     FlushMRSAndReopenNoRoll(*mrs, schema_, &rs2);
-    ASSERT_NO_FATAL_FAILURE();
+    NO_FATALS();
   }
   DeleteRows(rs2.get(), 10);
 
@@ -623,7 +623,7 @@ TEST_F(TestCompaction, TestDuplicatedGhostRowsMerging) {
     InsertRows(mrs.get(), 10, 0);
     UpdateRows(mrs.get(), 10, 0, 2);
     FlushMRSAndReopenNoRoll(*mrs, schema_, &rs3);
-    ASSERT_NO_FATAL_FAILURE();
+    NO_FATALS();
   }
 
   shared_ptr<DiskRowSet> result;
@@ -847,7 +847,7 @@ TEST_F(TestCompaction, TestMRSCompactionDoesntOutputUnobservableRows) {
                                 mem_trackers_.tablet_tracker, &mrs));
     InsertRow(mrs.get(), 1, 1);
     FlushMRSAndReopenNoRoll(*mrs, schema_, &rs1);
-    ASSERT_NO_FATAL_FAILURE();
+    NO_FATALS();
   }
 
   // Now make the row a ghost in rs1 in the same transaction as we reinsert it in the mrs then
@@ -868,7 +868,7 @@ TEST_F(TestCompaction, TestMRSCompactionDoesntOutputUnobservableRows) {
     InsertRowInTransaction(mrs.get(), tx, 2, 0);
     tx.Commit();
     FlushMRSAndReopenNoRoll(*mrs, schema_, &rs2);
-    ASSERT_NO_FATAL_FAILURE();
+    NO_FATALS();
   }
 
   MvccSnapshot all_snap = MvccSnapshot::CreateSnapshotIncludingAllTransactions();
@@ -912,7 +912,7 @@ TEST_F(TestCompaction, TestOneToOne) {
   // Flush it to disk and re-open.
   shared_ptr<DiskRowSet> rs;
   FlushMRSAndReopenNoRoll(*mrs, schema_, &rs);
-  ASSERT_NO_FATAL_FAILURE();
+  NO_FATALS();
 
   // Update the rows with some updates that weren't in the snapshot.
   UpdateRows(mrs.get(), 1000, 0, 2);
@@ -957,7 +957,7 @@ TEST_F(TestCompaction, TestKUDU102) {
   InsertRows(mrs.get(), 10, 0);
   shared_ptr<DiskRowSet> rs;
   FlushMRSAndReopenNoRoll(*mrs, schema_, &rs);
-  ASSERT_NO_FATAL_FAILURE();
+  NO_FATALS();
 
   shared_ptr<MemRowSet> mrs_b;
   ASSERT_OK(MemRowSet::Create(1, schema_, log_anchor_registry_.get(),
@@ -966,7 +966,7 @@ TEST_F(TestCompaction, TestKUDU102) {
   MvccSnapshot snap(mvcc_);
   shared_ptr<DiskRowSet> rs_b;
   FlushMRSAndReopenNoRoll(*mrs_b, schema_, &rs_b);
-  ASSERT_NO_FATAL_FAILURE();
+  NO_FATALS();
 
   // Update all the rows in the second row set
   UpdateRows(mrs_b.get(), 10, 100, 2);
@@ -1058,7 +1058,7 @@ TEST_F(TestCompaction, BenchmarkMergeWithoutOverlap) {
     LOG(INFO) << "Skipped: must enable slow tests.";
     return;
   }
-  ASSERT_NO_FATAL_FAILURE(DoBenchmark<false>());
+  NO_FATALS(DoBenchmark<false>());
 }
 
 // Benchmark for the compaction merge input when the inputs are entirely
@@ -1068,7 +1068,7 @@ TEST_F(TestCompaction, BenchmarkMergeWithOverlap) {
     LOG(INFO) << "Skipped: must enable slow tests.";
     return;
   }
-  ASSERT_NO_FATAL_FAILURE(DoBenchmark<true>());
+  NO_FATALS(DoBenchmark<true>());
 }
 #endif
 
diff --git a/src/kudu/tablet/composite-pushdown-test.cc b/src/kudu/tablet/composite-pushdown-test.cc
index 4698d8a..7a7c32f 100644
--- a/src/kudu/tablet/composite-pushdown-test.cc
+++ b/src/kudu/tablet/composite-pushdown-test.cc
@@ -145,7 +145,7 @@ TEST_F(CompositePushdownTest, TestPushDownExactEquality) {
   spec.AddPredicate(pred_host);
   vector<string> results;
 
-  ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Exact match using compound key"));
+  NO_FATALS(ScanTablet(&spec, &results, "Exact match using compound key"));
   ASSERT_EQ(1, results.size());
   EXPECT_EQ("(int16 year=2001, int8 month=9, int8 day=7, "
             R"(string hostname="foo", string data="2001/09/07-foo"))",
@@ -172,7 +172,7 @@ TEST_F(CompositePushdownTest, TestPushDownStringInequality) {
   spec.AddPredicate(*pred_host);
   vector<string> results;
 
-  ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Exact match using compound key"));
+  NO_FATALS(ScanTablet(&spec, &results, "Exact match using compound key"));
   ASSERT_EQ(2, results.size());
   EXPECT_EQ("(int16 year=2001, int8 month=9, int8 day=7, "
             R"(string hostname="baz", string data="2001/09/07-baz"))",
@@ -196,7 +196,7 @@ TEST_F(CompositePushdownTest, TestPushDownDateEquality) {
   spec.AddPredicate(pred_day);
   vector<string> results;
 
-  ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Exact match using compound key"));
+  NO_FATALS(ScanTablet(&spec, &results, "Exact match using compound key"));
   ASSERT_EQ(3, results.size());
   EXPECT_EQ("(int16 year=2001, int8 month=9, int8 day=7, "
             R"(string hostname="baz", string data="2001/09/07-baz"))",
@@ -220,8 +220,8 @@ TEST_F(CompositePushdownTest, TestPushDownPrefixEquality) {
     spec.AddPredicate(pred_year);
     spec.AddPredicate(pred_month);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results,
-                                       "Prefix match using 2/3 of a compound key"));
+    NO_FATALS(ScanTablet(&spec, &results,
+                         "Prefix match using 2/3 of a compound key"));
     ASSERT_EQ(28 * 3, results.size());
     EXPECT_EQ("(int16 year=2001, int8 month=9, int8 day=1, "
               R"(string hostname="baz", string data="2001/09/01-baz"))",
@@ -235,8 +235,8 @@ TEST_F(CompositePushdownTest, TestPushDownPrefixEquality) {
     ScanSpec spec;
     spec.AddPredicate(pred_year);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results,
-                                       "Prefix match using 1/3 of a compound key"));
+    NO_FATALS(ScanTablet(&spec, &results,
+                         "Prefix match using 1/3 of a compound key"));
     ASSERT_EQ(28 * 12 * 3, results.size());
     EXPECT_EQ("(int16 year=2001, int8 month=1, int8 day=1, "
               R"(string hostname="baz", string data="2001/01/01-baz"))",
@@ -274,7 +274,7 @@ TEST_F(CompositePushdownTest, TestPushDownPrefixEqualitySuffixInequality) {
     spec.AddPredicate(pred_month_eq);
     spec.AddPredicate(pred_day_ge_lt);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
+    NO_FATALS(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
     ASSERT_EQ(15 * 3, results.size());
     EXPECT_EQ("(int16 year=2001, int8 month=9, int8 day=1, "
               R"(string hostname="baz", string data="2001/09/01-baz"))",
@@ -291,7 +291,7 @@ TEST_F(CompositePushdownTest, TestPushDownPrefixEqualitySuffixInequality) {
     spec.AddPredicate(pred_month_eq);
     spec.AddPredicate(pred_day_ge);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
+    NO_FATALS(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
     ASSERT_EQ(28 * 3, results.size());
     EXPECT_EQ("(int16 year=2001, int8 month=9, int8 day=1, "
               R"(string hostname="baz", string data="2001/09/01-baz"))",
@@ -308,7 +308,7 @@ TEST_F(CompositePushdownTest, TestPushDownPrefixEqualitySuffixInequality) {
     spec.AddPredicate(pred_month_eq);
     spec.AddPredicate(pred_day_lt);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
+    NO_FATALS(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
     ASSERT_EQ(15 * 3, results.size());
     EXPECT_EQ("(int16 year=2001, int8 month=9, int8 day=1, "
               R"(string hostname="baz", string data="2001/09/01-baz"))",
@@ -324,7 +324,7 @@ TEST_F(CompositePushdownTest, TestPushDownPrefixEqualitySuffixInequality) {
     spec.AddPredicate(pred_year);
     spec.AddPredicate(pred_month_ge_lt);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
+    NO_FATALS(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
     ASSERT_EQ(3 * 28 * 3, results.size());
     EXPECT_EQ("(int16 year=2001, int8 month=9, int8 day=1, "
               R"(string hostname="baz", string data="2001/09/01-baz"))",
@@ -340,7 +340,7 @@ TEST_F(CompositePushdownTest, TestPushDownPrefixEqualitySuffixInequality) {
     spec.AddPredicate(pred_year);
     spec.AddPredicate(pred_month_lt);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
+    NO_FATALS(ScanTablet(&spec, &results, "Prefix equality, suffix inequality"));
     ASSERT_EQ(8 * 28 * 3, results.size());
     EXPECT_EQ("(int16 year=2001, int8 month=1, int8 day=1, "
               R"(string hostname="baz", string data="2001/01/01-baz"))",
@@ -361,7 +361,7 @@ TEST_F(CompositePushdownTest, TestPushdownPrefixInequality) {
     ScanSpec spec;
     spec.AddPredicate(pred_year);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Prefix inequality"));
+    NO_FATALS(ScanTablet(&spec, &results, "Prefix inequality"));
     ASSERT_EQ(3 * 12 * 28 * 3, results.size());
     EXPECT_EQ("(int16 year=2001, int8 month=1, int8 day=1, "
               R"(string hostname="baz", string data="2001/01/01-baz"))",
@@ -377,7 +377,7 @@ TEST_F(CompositePushdownTest, TestPushdownPrefixInequality) {
     ScanSpec spec;
     spec.AddPredicate(pred_year);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Prefix inequality"));
+    NO_FATALS(ScanTablet(&spec, &results, "Prefix inequality"));
     ASSERT_EQ(10 * 12 * 28 * 3, results.size());
     // Needed because results from memrowset are returned first and memrowset begins
     // with last 10% of the keys (e.g., last few years)
@@ -395,7 +395,7 @@ TEST_F(CompositePushdownTest, TestPushdownPrefixInequality) {
     ScanSpec spec;
     spec.AddPredicate(pred_year);
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(ScanTablet(&spec, &results, "Prefix inequality"));
+    NO_FATALS(ScanTablet(&spec, &results, "Prefix inequality"));
     ASSERT_EQ(4 * 12 * 28 * 3, results.size());
     EXPECT_EQ("(int16 year=2000, int8 month=1, int8 day=1, "
               R"(string hostname="baz", string data="2000/01/01-baz"))",
diff --git a/src/kudu/tablet/major_delta_compaction-test.cc b/src/kudu/tablet/major_delta_compaction-test.cc
index 24af96f..48857bf 100644
--- a/src/kudu/tablet/major_delta_compaction-test.cc
+++ b/src/kudu/tablet/major_delta_compaction-test.cc
@@ -209,7 +209,7 @@ TEST_F(TestMajorDeltaCompaction, TestKudu2656) {
 // unchanged columns intact.
 TEST_F(TestMajorDeltaCompaction, TestCompact) {
   const int kNumRows = 100;
-  ASSERT_NO_FATAL_FAILURE(WriteTestTablet(kNumRows));
+  NO_FATALS(WriteTestTablet(kNumRows));
   ASSERT_OK(tablet()->Flush());
 
   vector<shared_ptr<RowSet> > all_rowsets;
@@ -229,17 +229,17 @@ TEST_F(TestMajorDeltaCompaction, TestCompact) {
   for (int i = 0; i < 3; i++) {
     SCOPED_TRACE(Substitute("Update/compact round $0", i));
     // Update the even rows and verify.
-    ASSERT_NO_FATAL_FAILURE(UpdateRows(kNumRows, false));
-    ASSERT_NO_FATAL_FAILURE(VerifyData());
+    NO_FATALS(UpdateRows(kNumRows, false));
+    NO_FATALS(VerifyData());
 
     // Flush the deltas, make sure data stays the same.
     ASSERT_OK(tablet()->FlushBiggestDMS());
-    ASSERT_NO_FATAL_FAILURE(VerifyData());
+    NO_FATALS(VerifyData());
 
     // Update the odd rows and flush deltas
-    ASSERT_NO_FATAL_FAILURE(UpdateRows(kNumRows, true));
+    NO_FATALS(UpdateRows(kNumRows, true));
     ASSERT_OK(tablet()->FlushBiggestDMS());
-    ASSERT_NO_FATAL_FAILURE(VerifyData());
+    NO_FATALS(VerifyData());
 
     // Major compact some columns.
     vector<ColumnId> col_ids;
@@ -248,14 +248,14 @@ TEST_F(TestMajorDeltaCompaction, TestCompact) {
     }
     ASSERT_OK(tablet()->DoMajorDeltaCompaction(col_ids, rs));
 
-    ASSERT_NO_FATAL_FAILURE(VerifyData());
+    NO_FATALS(VerifyData());
   }
 }
 
 // Verify that we do issue UNDO files and that we can read them.
 TEST_F(TestMajorDeltaCompaction, TestUndos) {
   const int kNumRows = 100;
-  ASSERT_NO_FATAL_FAILURE(WriteTestTablet(kNumRows));
+  NO_FATALS(WriteTestTablet(kNumRows));
   ASSERT_OK(tablet()->Flush());
 
   vector<shared_ptr<RowSet> > all_rowsets;
@@ -266,26 +266,26 @@ TEST_F(TestMajorDeltaCompaction, TestUndos) {
   MvccSnapshot snap(*tablet()->mvcc_manager());
 
   // Verify the old data and grab a copy of the old state.
-  ASSERT_NO_FATAL_FAILURE(VerifyDataWithMvccAndExpectedState(snap, expected_state_));
+  NO_FATALS(VerifyDataWithMvccAndExpectedState(snap, expected_state_));
   vector<ExpectedRow> old_state(expected_state_.size());
   std::copy(expected_state_.begin(), expected_state_.end(), old_state.begin());
 
   // Flush the DMS, make sure we still see the old data.
-  ASSERT_NO_FATAL_FAILURE(UpdateRows(kNumRows, false));
+  NO_FATALS(UpdateRows(kNumRows, false));
   ASSERT_OK(tablet()->FlushBiggestDMS());
-  ASSERT_NO_FATAL_FAILURE(VerifyDataWithMvccAndExpectedState(snap, old_state));
+  NO_FATALS(VerifyDataWithMvccAndExpectedState(snap, old_state));
 
   // Major compact, check we still have the old data.
   vector<ColumnId> col_ids_to_compact = { schema_.column_id(1),
                                           schema_.column_id(3),
                                           schema_.column_id(4) };
   ASSERT_OK(tablet()->DoMajorDeltaCompaction(col_ids_to_compact, rs));
-  ASSERT_NO_FATAL_FAILURE(VerifyDataWithMvccAndExpectedState(snap, old_state));
+  NO_FATALS(VerifyDataWithMvccAndExpectedState(snap, old_state));
 
   // Test adding three updates per row to three REDO files.
   for (int i = 0; i < 3; i++) {
     for (int j = 0; j < 3; j++) {
-      ASSERT_NO_FATAL_FAILURE(UpdateRows(kNumRows, false));
+      NO_FATALS(UpdateRows(kNumRows, false));
     }
     ASSERT_OK(tablet()->FlushBiggestDMS());
   }
@@ -294,37 +294,37 @@ TEST_F(TestMajorDeltaCompaction, TestUndos) {
   // and the new data.
   col_ids_to_compact.pop_back();
   ASSERT_OK(tablet()->DoMajorDeltaCompaction(col_ids_to_compact, rs));
-  ASSERT_NO_FATAL_FAILURE(VerifyDataWithMvccAndExpectedState(snap, old_state));
-  ASSERT_NO_FATAL_FAILURE(VerifyData());
+  NO_FATALS(VerifyDataWithMvccAndExpectedState(snap, old_state));
+  NO_FATALS(VerifyData());
 }
 
 // Test that the delete REDO mutations are written back and not filtered out.
 TEST_F(TestMajorDeltaCompaction, TestCarryDeletesOver) {
   const int kNumRows = 100;
 
-  ASSERT_NO_FATAL_FAILURE(WriteTestTablet(kNumRows));
+  NO_FATALS(WriteTestTablet(kNumRows));
   ASSERT_OK(tablet()->Flush());
 
   vector<shared_ptr<RowSet> > all_rowsets;
   tablet()->GetRowSetsForTests(&all_rowsets);
   shared_ptr<RowSet> rs = all_rowsets.front();
 
-  ASSERT_NO_FATAL_FAILURE(UpdateRows(kNumRows, false));
+  NO_FATALS(UpdateRows(kNumRows, false));
   ASSERT_OK(tablet()->FlushBiggestDMS());
 
   MvccSnapshot updates_snap(*tablet()->mvcc_manager());
   vector<ExpectedRow> old_state(expected_state_.size());
   std::copy(expected_state_.begin(), expected_state_.end(), old_state.begin());
 
-  ASSERT_NO_FATAL_FAILURE(DeleteRows(kNumRows));
+  NO_FATALS(DeleteRows(kNumRows));
   ASSERT_OK(tablet()->FlushBiggestDMS());
 
   vector<ColumnId> col_ids_to_compact = { schema_.column_id(4) };
   ASSERT_OK(tablet()->DoMajorDeltaCompaction(col_ids_to_compact, rs));
 
-  ASSERT_NO_FATAL_FAILURE(VerifyData());
+  NO_FATALS(VerifyData());
 
-  ASSERT_NO_FATAL_FAILURE(VerifyDataWithMvccAndExpectedState(updates_snap, old_state));
+  NO_FATALS(VerifyDataWithMvccAndExpectedState(updates_snap, old_state));
 }
 
 // Verify that reinserts only happen in the MRS and not down into the DRS. This test serves as a
@@ -334,13 +334,13 @@ TEST_F(TestMajorDeltaCompaction, TestReinserts) {
   const int kNumRows = 100;
 
   // Reinsert all the rows directly in the MRS.
-  ASSERT_NO_FATAL_FAILURE(WriteTestTablet(kNumRows)); // 1st batch.
-  ASSERT_NO_FATAL_FAILURE(DeleteRows(kNumRows)); // Delete 1st batch.
-  ASSERT_NO_FATAL_FAILURE(WriteTestTablet(kNumRows)); // 2nd batch.
+  NO_FATALS(WriteTestTablet(kNumRows)); // 1st batch.
+  NO_FATALS(DeleteRows(kNumRows)); // Delete 1st batch.
+  NO_FATALS(WriteTestTablet(kNumRows)); // 2nd batch.
   ASSERT_OK(tablet()->Flush());
 
   // Update those rows, we'll try to read them at the end.
-  ASSERT_NO_FATAL_FAILURE(UpdateRows(kNumRows, false)); // Update 2nd batch.
+  NO_FATALS(UpdateRows(kNumRows, false)); // Update 2nd batch.
   vector<ExpectedRow> old_state(expected_state_.size());
   std::copy(expected_state_.begin(), expected_state_.end(), old_state.begin());
   MvccSnapshot second_batch_inserts(*tablet()->mvcc_manager());
@@ -349,12 +349,12 @@ TEST_F(TestMajorDeltaCompaction, TestReinserts) {
   tablet()->GetRowSetsForTests(&all_rowsets);
   ASSERT_EQ(1, all_rowsets.size());
 
-  ASSERT_NO_FATAL_FAILURE(VerifyData());
+  NO_FATALS(VerifyData());
 
   // Delete the rows (will go into the DMS) then reinsert them (will go in a new MRS), then flush
   // the DMS with the deletes so that we can major compact them.
-  ASSERT_NO_FATAL_FAILURE(DeleteRows(kNumRows)); // Delete 2nd batch.
-  ASSERT_NO_FATAL_FAILURE(WriteTestTablet(kNumRows)); // 3rd batch.
+  NO_FATALS(DeleteRows(kNumRows)); // Delete 2nd batch.
+  NO_FATALS(WriteTestTablet(kNumRows)); // 3rd batch.
   ASSERT_OK(tablet()->FlushBiggestDMS());
 
   // At this point, here's the layout (the 1st batch was discarded during the first flush):
@@ -369,7 +369,7 @@ TEST_F(TestMajorDeltaCompaction, TestReinserts) {
   ASSERT_OK(tablet()->DoMajorDeltaCompaction(col_ids_to_compact, rs));
 
   // The data we'll see here is the 3rd batch of inserts, doesn't have updates.
-  ASSERT_NO_FATAL_FAILURE(VerifyData());
+  NO_FATALS(VerifyData());
 
   // Test that the 3rd batch of inserts goes into a new RS, even though it's the same row keys.
   ASSERT_OK(tablet()->Flush());
@@ -378,19 +378,19 @@ TEST_F(TestMajorDeltaCompaction, TestReinserts) {
   ASSERT_EQ(2, all_rowsets.size());
 
   // Verify the 3rd batch.
-  ASSERT_NO_FATAL_FAILURE(VerifyData());
+  NO_FATALS(VerifyData());
 
   // Verify the updates in the second batch are still readable, from the first RS.
-  ASSERT_NO_FATAL_FAILURE(VerifyDataWithMvccAndExpectedState(second_batch_inserts, old_state));
+  NO_FATALS(VerifyDataWithMvccAndExpectedState(second_batch_inserts, old_state));
 }
 
 // Verify that we won't schedule a major compaction when files are just composed of deletes.
 TEST_F(TestMajorDeltaCompaction, TestJustDeletes) {
   const int kNumRows = 100;
 
-  ASSERT_NO_FATAL_FAILURE(WriteTestTablet(kNumRows));
+  NO_FATALS(WriteTestTablet(kNumRows));
   ASSERT_OK(tablet()->Flush());
-  ASSERT_NO_FATAL_FAILURE(DeleteRows(kNumRows));
+  NO_FATALS(DeleteRows(kNumRows));
   ASSERT_OK(tablet()->FlushBiggestDMS());
 
   shared_ptr<RowSet> rs;
diff --git a/src/kudu/tablet/mt-rowset_delta_compaction-test.cc b/src/kudu/tablet/mt-rowset_delta_compaction-test.cc
index 6af4d12..08a54df 100644
--- a/src/kudu/tablet/mt-rowset_delta_compaction-test.cc
+++ b/src/kudu/tablet/mt-rowset_delta_compaction-test.cc
@@ -179,9 +179,9 @@ class TestMultiThreadedRowSetDeltaCompaction : public TestRowSet {
     StartThreads(rs.get());
     SleepFor(MonoDelta::FromSeconds(FLAGS_num_seconds_per_thread));
     base::subtle::NoBarrier_Store(&should_run_, 0);
-    ASSERT_NO_FATAL_FAILURE(JoinThreads());
+    NO_FATALS(JoinThreads());
 
-    ASSERT_NO_FATAL_FAILURE(ReadVerify(rs.get()));
+    NO_FATALS(ReadVerify(rs.get()));
   }
 
   bool ShouldRun() const {
diff --git a/src/kudu/tablet/tablet_random_access-test.cc b/src/kudu/tablet/tablet_random_access-test.cc
index 0c87894..12455e8 100644
--- a/src/kudu/tablet/tablet_random_access-test.cc
+++ b/src/kudu/tablet/tablet_random_access-test.cc
@@ -51,6 +51,7 @@
 #include "kudu/util/monotime.h"
 #include "kudu/util/status.h"
 #include "kudu/util/stopwatch.h"
+#include "kudu/util/test_macros.h"
 #include "kudu/util/test_util.h"
 #include "kudu/util/thread.h"
 
@@ -157,7 +158,7 @@ class TestRandomAccess : public KuduTabletTest {
     s.start();
     while (s.elapsed().wall_seconds() < FLAGS_runtime_seconds) {
       for (int i = 0; i < 100; i++) {
-        ASSERT_NO_FATAL_FAILURE(DoRandomBatch());
+        NO_FATALS(DoRandomBatch());
         op_count++;
       }
     }
diff --git a/src/kudu/tserver/tablet_server-test.cc b/src/kudu/tserver/tablet_server-test.cc
index 9ef93ac..a7d5b4e 100644
--- a/src/kudu/tserver/tablet_server-test.cc
+++ b/src/kudu/tserver/tablet_server-test.cc
@@ -819,7 +819,7 @@ TEST_F(TabletServerTest, TestInsert) {
   Timestamp now_before = mini_server_->server()->clock()->Now();
 
   rows_inserted = nullptr;
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(schema_, { KeyValue(1, 1), KeyValue(2, 1), KeyValue(1234, 5678) });
 
   // get the clock's timestamp after replay
@@ -1108,7 +1108,7 @@ TEST_F(TabletServerTest, TestInsertAndMutate) {
 
   rows_inserted = nullptr;
   rows_updated = nullptr;
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(schema_, { KeyValue(2, 3), KeyValue(3, 4), KeyValue(4, 4), KeyValue(6, 6) });
 
   // get the clock's timestamp after replay
@@ -1294,7 +1294,7 @@ TEST_F(TabletServerTest, TestRecoveryWithMutationsWhileFlushing) {
   // Shutdown the tserver and try and rebuild the tablet from the log
   // produced on recovery (recovery flushed no state, but produced a new
   // log).
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(schema_, { KeyValue(1, 10),
                         KeyValue(2, 20),
                         KeyValue(3, 30),
@@ -1307,7 +1307,7 @@ TEST_F(TabletServerTest, TestRecoveryWithMutationsWhileFlushing) {
 
   // Shutdown and rebuild again to test that the log generated during
   // the previous recovery allows to perform recovery again.
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(schema_, { KeyValue(1, 10),
                         KeyValue(2, 20),
                         KeyValue(3, 30),
@@ -1332,7 +1332,7 @@ TEST_F(TabletServerTest, TestRecoveryWithMutationsWhileFlushingAndCompacting) {
   // flush the first time
   ASSERT_OK(tablet_replica_->tablet()->Flush());
 
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(schema_, { KeyValue(1, 10),
                         KeyValue(2, 20),
                         KeyValue(3, 30),
@@ -1372,7 +1372,7 @@ TEST_F(TabletServerTest, TestRecoveryWithMutationsWhileFlushingAndCompacting) {
   // Shutdown the tserver and try and rebuild the tablet from the log
   // produced on recovery (recovery flushed no state, but produced a new
   // log).
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(schema_, { KeyValue(1, 11),
                         KeyValue(2, 22),
                         KeyValue(3, 32),
@@ -1389,14 +1389,14 @@ TEST_F(TabletServerTest, TestRecoveryWithMutationsWhileFlushingAndCompacting) {
   ASSERT_GE(now_after.value(), now_before.value());
 }
 
-#define ANFF ASSERT_NO_FATAL_FAILURE
+#define ANFF NO_FATALS
 
 // Regression test for KUDU-176. Ensures that after a major delta compaction,
 // restarting properly recovers the tablet.
 TEST_F(TabletServerTest, TestKUDU_176_RecoveryAfterMajorDeltaCompaction) {
 
   // Flush a DRS with 1 rows.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRowsRemote(1, 1));
+  NO_FATALS(InsertTestRowsRemote(1, 1));
   ASSERT_OK(tablet_replica_->tablet()->Flush());
   ANFF(VerifyRows(schema_, { KeyValue(1, 1) }));
 
@@ -1600,7 +1600,7 @@ TEST_F(TabletServerTest, TestReadLatest) {
       METRIC_tablet_active_scanners.Instantiate(tablet->tablet()->GetMetricEntity(), 0);
 
   ScanResponsePB resp;
-  ASSERT_NO_FATAL_FAILURE(OpenScannerWithAllColumns(&resp));
+  NO_FATALS(OpenScannerWithAllColumns(&resp));
 
   // Ensure that the scanner ID came back and got inserted into the
   // ScannerManager map.
@@ -1619,7 +1619,7 @@ TEST_F(TabletServerTest, TestReadLatest) {
 
   // Drain all the rows from the scanner.
   vector<string> results;
-  ASSERT_NO_FATAL_FAILURE(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
+  NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
   ASSERT_EQ(num_rows, results.size());
 
   KuduPartialRow row(&schema_);
@@ -1673,7 +1673,7 @@ TEST_P(ExpiredScannerParamTest, Test) {
 
   // Open a scanner but don't read from it.
   ScanResponsePB resp;
-  ASSERT_NO_FATAL_FAILURE(OpenScannerWithAllColumns(&resp, mode));
+  NO_FATALS(OpenScannerWithAllColumns(&resp, mode));
 
   // The scanner should expire after a short time.
   ASSERT_EVENTUALLY([&]() {
@@ -1790,7 +1790,7 @@ TEST_P(ScannerOpenWhenServerShutsDownParamTest, Test) {
   ASSERT_OK(tablet_replica_->tablet()->Flush());
 
   ScanResponsePB resp;
-  ASSERT_NO_FATAL_FAILURE(OpenScannerWithAllColumns(&resp, mode));
+  NO_FATALS(OpenScannerWithAllColumns(&resp, mode));
 
   // Scanner is now open. The test will now shut down the TS with the scanner still
   // out there. Due to KUDU-161 this used to fail, since the scanner (and thus the MRS)
@@ -1854,7 +1854,7 @@ TEST_F(TabletServerTest, TestSnapshotScan) {
     ASSERT_TRUE(resp.has_more_results());
     // Drain all the rows from the scanner.
     vector<string> results;
-    ASSERT_NO_FATAL_FAILURE(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
+    NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
     // on each scan we should get (num_rows / num_batches) * batch_idx rows back
     int expected_num_rows = (num_rows / num_batches) * batch_idx;
     ASSERT_EQ(expected_num_rows, results.size());
@@ -2089,7 +2089,7 @@ TEST_F(TabletServerTest, TestSnapshotScan_SnapshotInTheFutureWithPropagatedTimes
             HybridClock::GetLogicalValue(propagated_timestamp));
 
   vector<string> results;
-  ASSERT_NO_FATAL_FAILURE(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
+  NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
   ASSERT_EQ(1, results.size());
   ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="original0"))", results[0]);
 }
@@ -2157,7 +2157,7 @@ TEST_F(TabletServerTest, TestScanYourWrites) {
   propagated_timestamp = resp.snap_timestamp();
   // Drain all the rows from the scanner.
   vector<string> results;
-  ASSERT_NO_FATAL_FAILURE(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
+  NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
   ASSERT_EQ(kNumRows, results.size());
   ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="original0"))", results[0]);
   ASSERT_EQ(R"((int32 key=99, int32 int_val=99, string string_val="original99"))", results[99]);
@@ -2168,7 +2168,7 @@ TEST_F(TabletServerTest, TestScanYourWrites) {
   ScanYourWritesTest(propagated_timestamp, &new_resp);
   // Drain all the rows from the scanner.
   results.clear();
-  ASSERT_NO_FATAL_FAILURE(DrainScannerToStrings(new_resp.scanner_id(), schema_, &results));
+  NO_FATALS(DrainScannerToStrings(new_resp.scanner_id(), schema_, &results));
   ASSERT_EQ(kNumRows, results.size());
   ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="original0"))", results[0]);
   ASSERT_EQ(R"((int32 key=99, int32 int_val=99, string string_val="original99"))", results[99]);
@@ -2246,7 +2246,7 @@ TEST_F(TabletServerTest, TestScanWithStringPredicates) {
 
   // Drain all the rows from the scanner.
   vector<string> results;
-  ASSERT_NO_FATAL_FAILURE(
+  NO_FATALS(
     DrainScannerToStrings(resp.scanner_id(), schema_, &results));
   ASSERT_EQ(10, results.size());
   ASSERT_EQ(R"((int32 key=50, int32 int_val=100, string string_val="hello 50"))", results[0]);
@@ -2356,7 +2356,7 @@ TEST_F(TabletServerTest, TestScanWithPredicates) {
 
   // Drain all the rows from the scanner.
   vector<string> results;
-  ASSERT_NO_FATAL_FAILURE(
+  NO_FATALS(
     DrainScannerToStrings(resp.scanner_id(), schema_, &results));
   ASSERT_EQ(50, results.size());
 }
@@ -2402,7 +2402,7 @@ TEST_F(TabletServerTest, TestScanWithEncodedPredicates) {
 
   // Drain all the rows from the scanner.
   vector<string> results;
-  ASSERT_NO_FATAL_FAILURE(
+  NO_FATALS(
     DrainScannerToStrings(resp.scanner_id(), schema_, &results));
   ASSERT_EQ(9, results.size());
   EXPECT_EQ(R"((int32 key=51, int32 int_val=102, string string_val="hello 51"))",
@@ -2878,7 +2878,7 @@ void TabletServerTest::DoOrderedScanTest(const Schema& projection,
   }
 
   vector<string> results;
-  ASSERT_NO_FATAL_FAILURE(
+  NO_FATALS(
     DrainScannerToStrings(resp.scanner_id(), projection, &results));
 
   ASSERT_EQ(30, results.size());
@@ -2988,14 +2988,14 @@ TEST_F(TabletServerTest, TestAlterSchema) {
   const Schema projection({ ColumnSchema("key", INT32), (ColumnSchema("c2", INT32)) }, 1);
 
   // Try recovering from the original log
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(projection, { KeyValue(0, 7),
                            KeyValue(1, 7),
                            KeyValue(2, 5),
                            KeyValue(3, 5) });
 
   // Try recovering from the log generated on recovery
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(projection, { KeyValue(0, 7),
                            KeyValue(1, 7),
                            KeyValue(2, 5),
@@ -3039,11 +3039,11 @@ TEST_F(TabletServerTest, TestAlterSchema_AddColWithoutWriteDefault) {
   VerifyRows(projection, { KeyValue(0, 7), KeyValue(1, 7) });
 
   // Try recovering from the original log
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(projection, { KeyValue(0, 7), KeyValue(1, 7) });
 
   // Try recovering from the log generated on recovery
-  ASSERT_NO_FATAL_FAILURE(ShutdownAndRebuildTablet());
+  NO_FATALS(ShutdownAndRebuildTablet());
   VerifyRows(projection, { KeyValue(0, 7), KeyValue(1, 7) });
 }
 
@@ -3092,9 +3092,9 @@ TEST_F(TabletServerTest, TestDeleteTablet) {
 
   // Put some data in the tablet. We flush and insert more rows to ensure that
   // there is data both in the MRS and on disk.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRowsRemote(1, 1));
+  NO_FATALS(InsertTestRowsRemote(1, 1));
   ASSERT_OK(tablet_replica_->tablet()->Flush());
-  ASSERT_NO_FATAL_FAILURE(InsertTestRowsRemote(2, 1));
+  NO_FATALS(InsertTestRowsRemote(2, 1));
 
   const int block_count_after_flush = ondisk->value();
   if (FLAGS_block_manager == "log") {
@@ -3450,7 +3450,7 @@ TEST_F(TabletServerTest, TestChecksumScan) {
   ASSERT_FALSE(resp.has_more_results());
 
   // Finally, delete row 2, so we're back to the row 1 checksum.
-  ASSERT_NO_FATAL_FAILURE(DeleteTestRowsRemote(key, 1));
+  NO_FATALS(DeleteTestRowsRemote(key, 1));
   FLAGS_scanner_batch_size_rows = 100;
   req = new_req;
   controller.Reset();
@@ -3606,7 +3606,7 @@ TEST_F(TabletServerTest, TestNoMetricsForTombstonedTablet) {
   ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
 
   // Insert one row and check the insertion is recorded in the metrics.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRowsRemote(0, 1, 1));
+  NO_FATALS(InsertTestRowsRemote(0, 1, 1));
   scoped_refptr<Counter> rows_inserted =
       METRIC_rows_inserted.Instantiate(tablet->tablet()->GetMetricEntity());
   int64_t num_rows_running = rows_inserted->value();
@@ -3658,7 +3658,7 @@ TEST_F(TabletServerTest, TestTabletNumberOfDiskRowSetsMetric) {
   ASSERT_EQ(0, num_diskrowsets->value());
 
   // Insert a row and flush. There should be 1 diskrowset.
-  ASSERT_NO_FATAL_FAILURE(InsertTestRowsRemote(0, 1, 1));
+  NO_FATALS(InsertTestRowsRemote(0, 1, 1));
   ASSERT_OK(tablet->tablet()->Flush());
   ASSERT_EQ(1, num_diskrowsets->value());
 }
diff --git a/src/kudu/tserver/ts_tablet_manager-test.cc b/src/kudu/tserver/ts_tablet_manager-test.cc
index 0dd569f..6e5544d 100644
--- a/src/kudu/tserver/ts_tablet_manager-test.cc
+++ b/src/kudu/tserver/ts_tablet_manager-test.cc
@@ -52,10 +52,10 @@
 #include "kudu/util/test_util.h"
 
 #define ASSERT_REPORT_HAS_UPDATED_TABLET(report, tablet_id) \
-  ASSERT_NO_FATAL_FAILURE(AssertReportHasUpdatedTablet(report, tablet_id))
+  NO_FATALS(AssertReportHasUpdatedTablet(report, tablet_id))
 
 #define ASSERT_MONOTONIC_REPORT_SEQNO(report_seqno, tablet_report) \
-  ASSERT_NO_FATAL_FAILURE(AssertMonotonicReportSeqno(report_seqno, tablet_report))
+  NO_FATALS(AssertMonotonicReportSeqno(report_seqno, tablet_report))
 
 using std::string;
 using std::vector;
diff --git a/src/kudu/util/env-test.cc b/src/kudu/util/env-test.cc
index 5bc1f0b..e668461 100644
--- a/src/kudu/util/env-test.cc
+++ b/src/kudu/util/env-test.cc
@@ -167,7 +167,7 @@ class TestEnv : public KuduTest {
     unique_ptr<uint8_t[]> scratch(new uint8_t[n]);
     Slice s(scratch.get(), n);
     ASSERT_OK(raf->Read(offset, s));
-    ASSERT_NO_FATAL_FAILURE(VerifyTestData(s, offset));
+    NO_FATALS(VerifyTestData(s, offset));
   }
 
   void TestAppendV(size_t num_slices, size_t slice_size, size_t iterations,
@@ -213,8 +213,8 @@ class TestEnv : public KuduTest {
         if (!fast) {
           // Verify as write. Note: this requires that file is pre-allocated, otherwise
           // the Read() fails with EINVAL.
-          ASSERT_NO_FATAL_FAILURE(ReadAndVerifyTestData(raf.get(), num_slices * slice_size * i,
-                                                        num_slices * slice_size));
+          NO_FATALS(ReadAndVerifyTestData(raf.get(), num_slices * slice_size * i,
+                                          num_slices * slice_size));
         }
       }
     }
@@ -226,8 +226,8 @@ class TestEnv : public KuduTest {
       ASSERT_OK(env_util::OpenFileForRandom(env_, kTestPath, &raf));
     }
     for (int i = 0; i < iterations; i++) {
-      ASSERT_NO_FATAL_FAILURE(ReadAndVerifyTestData(raf.get(), num_slices * slice_size * i,
-                                                    num_slices * slice_size));
+      NO_FATALS(ReadAndVerifyTestData(raf.get(), num_slices * slice_size * i,
+                                      num_slices * slice_size));
     }
   }
 
@@ -474,7 +474,7 @@ TEST_F(TestEnv, TestReadFully) {
   Env* env = Env::Default();
 
   WriteTestFile(env, kTestPath, kFileSize);
-  ASSERT_NO_FATAL_FAILURE();
+  NO_FATALS();
 
   // Reopen for read
   shared_ptr<RandomAccessFile> raf;
@@ -571,15 +571,15 @@ TEST_F(TestEnv, TestIOVMax) {
 TEST_F(TestEnv, TestAppendV) {
   WritableFileOptions opts;
   LOG(INFO) << "Testing AppendV() only, NO pre-allocation";
-  ASSERT_NO_FATAL_FAILURE(TestAppendV(2000, 1024, 5, true, false, opts));
+  NO_FATALS(TestAppendV(2000, 1024, 5, true, false, opts));
 
   if (!fallocate_supported_) {
     LOG(INFO) << "fallocate not supported, skipping preallocated runs";
   } else {
     LOG(INFO) << "Testing AppendV() only, WITH pre-allocation";
-    ASSERT_NO_FATAL_FAILURE(TestAppendV(2000, 1024, 5, true, true, opts));
+    NO_FATALS(TestAppendV(2000, 1024, 5, true, true, opts));
     LOG(INFO) << "Testing AppendV() together with Append() and Read(), WITH pre-allocation";
-    ASSERT_NO_FATAL_FAILURE(TestAppendV(128, 4096, 5, false, true, opts));
+    NO_FATALS(TestAppendV(128, 4096, 5, false, true, opts));
   }
 }
 
@@ -592,7 +592,7 @@ TEST_F(TestEnv, TestGetExecutablePath) {
 TEST_F(TestEnv, TestOpenEmptyRandomAccessFile) {
   Env* env = Env::Default();
   string test_file = GetTestPath("test_file");
-  ASSERT_NO_FATAL_FAILURE(WriteTestFile(env, test_file, 0));
+  NO_FATALS(WriteTestFile(env, test_file, 0));
   unique_ptr<RandomAccessFile> readable_file;
   ASSERT_OK(env->NewRandomAccessFile(test_file, &readable_file));
   uint64_t size;
diff --git a/src/kudu/util/trace-test.cc b/src/kudu/util/trace-test.cc
index ed0b577..9473a54 100644
--- a/src/kudu/util/trace-test.cc
+++ b/src/kudu/util/trace-test.cc
@@ -538,7 +538,7 @@ TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording1) {
   TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
 
   DropTracedMetadataRecords();
-  ASSERT_NO_FATAL_FAILURE();
+  NO_FATALS();
   VerifyCallbackAndRecordedEvents(2, 2);
 }