You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pegasus.apache.org by wa...@apache.org on 2022/10/28 06:37:45 UTC

[incubator-pegasus] branch master updated: refactor(macro): use CHECK to replace dassert_f/dassert (part2) (#1211)

This is an automated email from the ASF dual-hosted git repository.

wangdan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git


The following commit(s) were added to refs/heads/master by this push:
     new cea109969 refactor(macro): use CHECK to replace dassert_f/dassert (part2) (#1211)
cea109969 is described below

commit cea109969e70b3472178a1594fad462a932369a6
Author: Yingchun Lai <la...@apache.org>
AuthorDate: Fri Oct 28 14:37:39 2022 +0800

    refactor(macro): use CHECK to replace dassert_f/dassert (part2) (#1211)
---
 src/common/json_helper.h                           |   2 +-
 src/failure_detector/failure_detector.cpp          |  16 +--
 .../failure_detector_multimaster.h                 |  10 +-
 src/http/http_message_parser.cpp                   |   2 +-
 src/meta/dump_file.h                               |  14 +--
 src/meta/duplication/duplication_info.h            |   2 +-
 src/meta/duplication/meta_duplication_service.h    |   4 +-
 src/meta/greedy_load_balancer.cpp                  |   2 +-
 src/meta/load_balance_policy.cpp                   |  16 +--
 src/meta/meta_backup_service.cpp                   | 107 ++++++++++-----------
 src/meta/meta_data.cpp                             |  20 ++--
 src/meta/meta_server_failure_detector.cpp          |   6 +-
 src/meta/meta_server_failure_detector.h            |  19 +---
 src/meta/meta_service.cpp                          |   4 +-
 src/meta/meta_split_service.cpp                    |  16 +--
 src/meta/meta_state_service_simple.cpp             |  17 ++--
 src/meta/meta_state_service_utils.cpp              |   4 +-
 src/meta/partition_guardian.cpp                    |  31 +++---
 src/meta/server_load_balancer.cpp                  |   9 +-
 src/meta/server_state.cpp                          |  52 +++++-----
 src/meta/server_state_restore.cpp                  |   4 +-
 .../test/balancer_simulator/balancer_simulator.cpp |   6 +-
 src/meta/test/balancer_validator.cpp               |   6 +-
 src/meta/test/meta_app_operation_test.cpp          |   4 +-
 src/meta/test/meta_state/meta_state_service.cpp    |   8 +-
 src/meta/test/misc/misc.cpp                        |   6 +-
 src/nfs/nfs_client_impl.cpp                        |   7 +-
 src/perf_counter/perf_counter_atomic.h             |  33 ++++---
 src/perf_counter/perf_counters.cpp                 |   4 +-
 src/redis_protocol/proxy_lib/redis_parser.cpp      |   2 +-
 src/replica/replica_stub.cpp                       |  27 +++---
 src/runtime/security/kinit_context.cpp             |   6 +-
 src/runtime/security/negotiation_manager.cpp       |   8 +-
 src/server/pegasus_server_impl.cpp                 |   8 +-
 src/shell/command_helper.h                         |   6 +-
 src/utils/casts.h                                  |   2 +-
 src/utils/fmt_logging.h                            |  12 +--
 37 files changed, 240 insertions(+), 262 deletions(-)

diff --git a/src/common/json_helper.h b/src/common/json_helper.h
index 8706512d3..e496ed0f7 100644
--- a/src/common/json_helper.h
+++ b/src/common/json_helper.h
@@ -506,7 +506,7 @@ inline void json_encode(JsonWriter &out, const dsn::ref_ptr<T> &t)
 {
     // when a smart ptr is encoded, caller should ensure the ptr is not nullptr
     // TODO: encoded to null?
-    dassert_f(t.get(), "");
+    CHECK(t.get(), "");
     json_encode(out, *t);
 }
 
diff --git a/src/failure_detector/failure_detector.cpp b/src/failure_detector/failure_detector.cpp
index e03db236d..7c828fa72 100644
--- a/src/failure_detector/failure_detector.cpp
+++ b/src/failure_detector/failure_detector.cpp
@@ -24,20 +24,14 @@
  * THE SOFTWARE.
  */
 
-/*
- * Description:
- *     What is this file about?
- *
- * Revision history:
- *     xxxx-xx-xx, author, first version
- *     xxxx-xx-xx, author, fix bug about xxx
- */
-
 #include "failure_detector/failure_detector.h"
-#include "utils/command_manager.h"
+
 #include <chrono>
 #include <ctime>
 
+#include "utils/command_manager.h"
+#include "utils/fmt_logging.h"
+
 namespace dsn {
 namespace fd {
 
@@ -320,7 +314,7 @@ bool failure_detector::remove_from_allow_list(::dsn::rpc_address node)
 
 void failure_detector::set_allow_list(const std::vector<std::string> &replica_addrs)
 {
-    dassert(!_is_started, "FD is already started, the allow list should really not be modified");
+    CHECK(!_is_started, "FD is already started, the allow list should really not be modified");
 
     std::vector<rpc_address> nodes;
     for (auto &addr : replica_addrs) {
diff --git a/src/failure_detector/failure_detector_multimaster.h b/src/failure_detector/failure_detector_multimaster.h
index d384e153a..b804ee108 100644
--- a/src/failure_detector/failure_detector_multimaster.h
+++ b/src/failure_detector/failure_detector_multimaster.h
@@ -26,10 +26,12 @@
 
 #pragma once
 
+#include <functional>
+
+#include "failure_detector/failure_detector.h"
 #include "runtime/rpc/group_address.h"
+#include "utils/fmt_logging.h"
 #include "utils/zlocks.h"
-#include "failure_detector/failure_detector.h"
-#include <functional>
 
 namespace dsn {
 namespace dist {
@@ -51,11 +53,11 @@ public:
     // server side
     void on_worker_disconnected(const std::vector<::dsn::rpc_address> &nodes) override
     {
-        dassert(false, "invalid execution flow");
+        CHECK(false, "invalid execution flow");
     }
     void on_worker_connected(::dsn::rpc_address node) override
     {
-        dassert(false, "invalid execution flow");
+        CHECK(false, "invalid execution flow");
     }
 
     ::dsn::rpc_address current_server_contact() const;
diff --git a/src/http/http_message_parser.cpp b/src/http/http_message_parser.cpp
index 5b1f508ff..c20b77973 100644
--- a/src/http/http_message_parser.cpp
+++ b/src/http/http_message_parser.cpp
@@ -209,7 +209,7 @@ void http_message_parser::prepare_on_send(message_ex *msg)
     const message_header *header = msg->header;
     std::vector<blob> &buffers = msg->buffers;
 
-    dassert(!header->context.u.is_request, "send response only");
+    CHECK(!header->context.u.is_request, "send response only");
 
     unsigned int dsn_size = sizeof(message_header) + header->body_length;
     int dsn_buf_count = 0;
diff --git a/src/meta/dump_file.h b/src/meta/dump_file.h
index c136470e5..edaef35d6 100644
--- a/src/meta/dump_file.h
+++ b/src/meta/dump_file.h
@@ -24,15 +24,6 @@
  * THE SOFTWARE.
  */
 
-/*
- * Description:
- *     A simple dump file implementation for meta server, which can be used to dump meta's
- * server-state
- *
- * Revision history:
- *     2015-12-10, Weijie Sun(sunweijie at xiaomi.com), first version
- *     xxxx-xx-xx, author, fix bug about xxx
- */
 #pragma once
 
 #include "utils/safe_strerror_posix.h"
@@ -54,6 +45,7 @@
 #include "runtime/rpc/rpc_stream.h"
 #include "runtime/serverlet.h"
 #include "runtime/service_app.h"
+#include "utils/fmt_logging.h"
 #include "utils/rpc_address.h"
 #include "utils/crc.h"
 #include <cstdio>
@@ -97,7 +89,7 @@ public:
     {
         static __thread char msg_buffer[128];
 
-        dassert(_is_write, "call append when open file with read mode");
+        CHECK(_is_write, "call append when open file with read mode");
 
         block_header hdr = {data_length, 0};
         hdr.crc32 = dsn::utils::crc32_calc(data, data_length, _crc);
@@ -122,7 +114,7 @@ public:
     int read_next_buffer(/*out*/ dsn::blob &output)
     {
         static __thread char msg_buffer[128];
-        dassert(!_is_write, "call read next buffer when open file with write mode");
+        CHECK(!_is_write, "call read next buffer when open file with write mode");
 
         block_header hdr;
         size_t len = fread(&hdr, sizeof(hdr), 1, _file_handle);
diff --git a/src/meta/duplication/duplication_info.h b/src/meta/duplication/duplication_info.h
index e4d245036..c2da4dffa 100644
--- a/src/meta/duplication/duplication_info.h
+++ b/src/meta/duplication/duplication_info.h
@@ -253,7 +253,7 @@ extern bool json_decode(const dsn::json::JsonObject &in, duplication_fail_mode::
 
 // TODO(yingchun): remember to update it when refactor dassert_f
 #define dassert_dup(_pred_, _dup_, ...)                                                            \
-    dassert_f(_pred_, "[a{}d{}] {}", _dup_->app_id, _dup_->id, fmt::format(__VA_ARGS__));
+    CHECK(_pred_, "[a{}d{}] {}", _dup_->app_id, _dup_->id, fmt::format(__VA_ARGS__));
 
 } // namespace replication
 } // namespace dsn
diff --git a/src/meta/duplication/meta_duplication_service.h b/src/meta/duplication/meta_duplication_service.h
index e978264a6..d933ac447 100644
--- a/src/meta/duplication/meta_duplication_service.h
+++ b/src/meta/duplication/meta_duplication_service.h
@@ -43,8 +43,8 @@ class meta_duplication_service
 public:
     meta_duplication_service(server_state *ss, meta_service *ms) : _state(ss), _meta_svc(ms)
     {
-        dassert(_state, "_state should not be null");
-        dassert(_meta_svc, "_meta_svc should not be null");
+        CHECK_NOTNULL(_state, "_state should not be null");
+        CHECK_NOTNULL(_meta_svc, "_meta_svc should not be null");
     }
 
     /// See replication.thrift for possible errors for each rpc.
diff --git a/src/meta/greedy_load_balancer.cpp b/src/meta/greedy_load_balancer.cpp
index 3899eb333..fd6e85696 100644
--- a/src/meta/greedy_load_balancer.cpp
+++ b/src/meta/greedy_load_balancer.cpp
@@ -240,7 +240,7 @@ void greedy_load_balancer::report(const dsn::replication::migration_list &list,
             counters[COPY_SEC_COUNT]++;
             break;
         default:
-            dassert(false, "");
+            CHECK(false, "");
         }
     }
     ::memcpy(t_operation_counters, counters, sizeof(counters));
diff --git a/src/meta/load_balance_policy.cpp b/src/meta/load_balance_policy.cpp
index c805528c3..a7bce5db8 100644
--- a/src/meta/load_balance_policy.cpp
+++ b/src/meta/load_balance_policy.cpp
@@ -53,7 +53,7 @@ bool calc_disk_load(node_mapper &nodes,
 {
     load.clear();
     const node_state *ns = get_node_state(nodes, node, false);
-    dassert(ns != nullptr, "can't find node(%s) from node_state", node.to_string());
+    CHECK_NOTNULL(ns, "can't find node({}) from node_state", node.to_string());
 
     auto add_one_replica_to_disk_load = [&](const gpid &pid) {
         LOG_DEBUG("add gpid(%d.%d) to node(%s) disk load",
@@ -159,7 +159,7 @@ generate_balancer_request(const app_mapper &apps,
             new_proposal_action(pc.primary, from, config_type::CT_REMOVE));
         break;
     default:
-        dassert(false, "");
+        CHECK(false, "");
     }
     LOG_INFO("generate balancer: %d.%d %s from %s of disk_tag(%s) to %s",
              pc.pid.get_app_id(),
@@ -293,7 +293,7 @@ void load_balance_policy::start_moving_primary(const std::shared_ptr<app_state>
             selected,
             generate_balancer_request(
                 *_global_view->apps, pc, balance_type::MOVE_PRIMARY, from, to));
-        dassert_f(balancer_result.second, "gpid({}) already inserted as an action", selected);
+        CHECK(balancer_result.second, "gpid({}) already inserted as an action", selected);
 
         --(*prev_load)[get_disk_tag(*_global_view->apps, from, selected)];
         ++(*current_load)[get_disk_tag(*_global_view->apps, to, selected)];
@@ -479,8 +479,8 @@ void load_balance_policy::number_nodes(const node_mapper &nodes)
     address_id.clear();
     address_vec.resize(_alive_nodes + 2);
     for (auto iter = nodes.begin(); iter != nodes.end(); ++iter) {
-        dassert(!iter->first.is_invalid() && !iter->second.addr().is_invalid(), "invalid address");
-        dassert(iter->second.alive(), "dead node");
+        CHECK(!iter->first.is_invalid() && !iter->second.addr().is_invalid(), "invalid address");
+        CHECK(iter->second.alive(), "dead node");
 
         address_id[iter->first] = current_id;
         address_vec[current_id] = iter->first;
@@ -738,9 +738,9 @@ gpid copy_replica_operation::select_partition(migration_list *result)
 
     int id_max = *_ordered_address_ids.rbegin();
     const node_state &ns = _nodes.find(_address_vec[id_max])->second;
-    dassert_f(partitions != nullptr && !partitions->empty(),
-              "max load({}) shouldn't empty",
-              ns.addr().to_string());
+    CHECK(partitions != nullptr && !partitions->empty(),
+          "max load({}) shouldn't empty",
+          ns.addr().to_string());
 
     return select_max_load_gpid(partitions, result);
 }
diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp
index 4113d9758..e8ea2bb61 100644
--- a/src/meta/meta_backup_service.cpp
+++ b/src/meta/meta_backup_service.cpp
@@ -108,10 +108,10 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id)
                          _backup_service->backup_option().block_retry_delay_ms);
         return;
     }
-    dassert(remote_file != nullptr,
-            "%s: create file(%s) succeed, but can't get handle",
-            _backup_sig.c_str(),
-            create_file_req.file_name.c_str());
+    CHECK_NOTNULL(remote_file,
+                  "{}: create file({}) succeed, but can't get handle",
+                  _backup_sig,
+                  create_file_req.file_name);
 
     remote_file->write(
         dist::block_service::write_request{buffer},
@@ -215,10 +215,10 @@ void policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id,
         return;
     }
 
-    dassert(remote_file != nullptr,
-            "%s: create file(%s) succeed, but can't get handle",
-            _backup_sig.c_str(),
-            create_file_req.file_name.c_str());
+    CHECK_NOTNULL(remote_file,
+                  "{}: create file({}) succeed, but can't get handle",
+                  _backup_sig,
+                  create_file_req.file_name);
     if (remote_file->get_size() > 0) {
         // we only focus whether app_backup_status file is exist, so ignore app_backup_status file's
         // context
@@ -281,10 +281,10 @@ void policy_context::finish_backup_app_unlocked(int32_t app_id)
                     tasking::create_task(LPC_DEFAULT_CALLBACK, &_tracker, [this]() {
                         zauto_lock l(_lock);
                         auto iter = _backup_history.emplace(_cur_backup.backup_id, _cur_backup);
-                        dassert(iter.second,
-                                "%s: backup_id(%lld) already in the backup_history",
-                                _policy.policy_name.c_str(),
-                                _cur_backup.backup_id);
+                        CHECK(iter.second,
+                              "{}: backup_id({}) already in the backup_history",
+                              _policy.policy_name,
+                              _cur_backup.backup_id);
                         _cur_backup.start_time_ms = 0;
                         _cur_backup.end_time_ms = 0;
                         LOG_INFO("%s: finish an old backup, try to start a new one",
@@ -332,10 +332,10 @@ void policy_context::write_backup_info_unlocked(const backup_info &b_info,
         return;
     }
 
-    dassert(remote_file != nullptr,
-            "%s: create file(%s) succeed, but can't get handle",
-            _backup_sig.c_str(),
-            create_file_req.file_name.c_str());
+    CHECK_NOTNULL(remote_file,
+                  "{}: create file({}) succeed, but can't get handle",
+                  _backup_sig,
+                  create_file_req.file_name);
 
     blob buf = dsn::json::json_forwarder<backup_info>::encode(b_info);
 
@@ -644,10 +644,10 @@ void policy_context::sync_backup_to_remote_storage_unlocked(const backup_info &b
                              0,
                              _backup_service->backup_option().meta_retry_delay_ms);
         } else {
-            dassert(false,
-                    "%s: we can't handle this right now, error(%s)",
-                    _backup_sig.c_str(),
-                    err.to_string());
+            CHECK(false,
+                  "{}: we can't handle this right now, error({})",
+                  _backup_sig,
+                  err.to_string());
         }
     };
 
@@ -835,11 +835,11 @@ void policy_context::add_backup_history(const backup_info &info)
                 _policy.policy_name.c_str(),
                 _cur_backup.backup_id,
                 info.backup_id);
-        dassert(_backup_history.empty() || info.backup_id > _backup_history.rbegin()->first,
-                "%s: backup_id(%lld) in history larger than current(%lld)",
-                _policy.policy_name.c_str(),
-                _backup_history.rbegin()->first,
-                info.backup_id);
+        CHECK(_backup_history.empty() || info.backup_id > _backup_history.rbegin()->first,
+              "{}: backup_id({}) in history larger than current({})",
+              _policy.policy_name,
+              _backup_history.rbegin()->first,
+              info.backup_id);
         _cur_backup = info;
         initialize_backup_progress_unlocked();
         _backup_sig =
@@ -850,17 +850,15 @@ void policy_context::add_backup_history(const backup_info &info)
                  info.backup_id,
                  info.start_time_ms,
                  info.end_time_ms);
-        dassert(_cur_backup.end_time_ms == 0 || info.backup_id < _cur_backup.backup_id,
-                "%s: backup_id(%lld) in history larger than current(%lld)",
-                _policy.policy_name.c_str(),
-                info.backup_id,
-                _cur_backup.backup_id);
+        CHECK(_cur_backup.end_time_ms == 0 || info.backup_id < _cur_backup.backup_id,
+              "{}: backup_id({}) in history larger than current({})",
+              _policy.policy_name,
+              info.backup_id,
+              _cur_backup.backup_id);
 
         auto result_pair = _backup_history.emplace(info.backup_id, info);
-        dassert(result_pair.second,
-                "%s: conflict backup id(%lld)",
-                _policy.policy_name.c_str(),
-                info.backup_id);
+        CHECK(
+            result_pair.second, "{}: conflict backup id({})", _policy.policy_name, info.backup_id);
     }
 }
 
@@ -902,9 +900,9 @@ void policy_context::set_policy(const policy &p)
                              ->get_block_service_manager()
                              .get_or_create_block_filesystem(_policy.backup_provider_type);
     }
-    dassert(_block_service,
-            "can't initialize block filesystem by provider (%s)",
-            _policy.backup_provider_type.c_str());
+    CHECK(_block_service,
+          "can't initialize block filesystem by provider ({})",
+          _policy.backup_provider_type);
 }
 
 policy policy_context::get_policy()
@@ -1021,10 +1019,10 @@ void policy_context::sync_remove_backup_info(const backup_info &info, dsn::task_
                 0,
                 _backup_service->backup_option().meta_retry_delay_ms);
         } else {
-            dassert(false,
-                    "%s: we can't handle this right now, error(%s)",
-                    _policy.policy_name.c_str(),
-                    err.to_string());
+            CHECK(false,
+                  "{}: we can't handle this right now, error({})",
+                  _policy.policy_name,
+                  err.to_string());
         }
     };
 
@@ -1073,7 +1071,7 @@ void backup_service::start_create_policy_meta_root(dsn::task_ptr callback)
                     0,
                     _opt.meta_retry_delay_ms);
             } else {
-                dassert(false, "we can't handle this error(%s) right now", err.to_string());
+                CHECK(false, "we can't handle this error({}) right now", err.to_string());
             }
         });
 }
@@ -1102,9 +1100,9 @@ void backup_service::start_sync_policies()
                               0,
                               _opt.meta_retry_delay_ms);
     } else {
-        dassert(false,
-                "sync policies from remote storage encounter error(%s), we can't handle "
-                "this right now");
+        CHECK(false,
+              "sync policies from remote storage encounter error({}), we can't handle "
+              "this right now");
     }
 }
 
@@ -1128,9 +1126,8 @@ error_code backup_service::sync_policies_from_remote_storage()
                     zauto_lock l(_lock);
                     auto it = _policy_states.find(policy_name);
                     if (it == _policy_states.end()) {
-                        dassert(false,
-                                "before initializing the backup_info, initialize the policy first");
-                        return;
+                        CHECK(false,
+                              "before initializing the backup_info, initialize the policy first");
                     }
                     ptr = it->second.get();
                 }
@@ -1303,7 +1300,7 @@ void backup_service::add_backup_policy(dsn::message_ex *msg)
 
     LOG_INFO_F("start to add backup polciy {}.", request.policy_name);
     std::shared_ptr<policy_context> policy_context_ptr = _factory(this);
-    dassert(policy_context_ptr != nullptr, "invalid policy_context");
+    CHECK_NOTNULL(policy_context_ptr, "invalid policy_context");
     policy p;
     p.policy_name = request.policy_name;
     p.backup_provider_type = request.backup_provider_type;
@@ -1352,9 +1349,9 @@ void backup_service::do_add_policy(dsn::message_ex *req,
                                  _opt.meta_retry_delay_ms);
                 return;
             } else {
-                dassert(false,
-                        "we can't handle this when create backup policy, err(%s)",
-                        err.to_string());
+                CHECK(false,
+                      "we can't handle this when create backup policy, err({})",
+                      err.to_string());
             }
         },
         value);
@@ -1390,9 +1387,9 @@ void backup_service::do_update_policy_to_remote_storage(
                                  0,
                                  _opt.meta_retry_delay_ms);
             } else {
-                dassert(false,
-                        "we can't handle this when create backup policy, err(%s)",
-                        err.to_string());
+                CHECK(false,
+                      "we can't handle this when create backup policy, err({})",
+                      err.to_string());
             }
         });
 }
diff --git a/src/meta/meta_data.cpp b/src/meta/meta_data.cpp
index f0853d6cf..ddff68ef3 100644
--- a/src/meta/meta_data.cpp
+++ b/src/meta/meta_data.cpp
@@ -174,10 +174,10 @@ bool construct_replica(meta_view view, const gpid &pid, int max_replica_count)
     // we put max_replica_count-1 recent replicas to last_drops, in case of the DDD-state when the
     // only primary dead
     // when add node to pc.last_drops, we don't remove it from our cc.drop_list
-    dassert(pc.last_drops.empty(),
-            "last_drops of partition(%d.%d) must be empty",
-            pid.get_app_id(),
-            pid.get_partition_index());
+    CHECK(pc.last_drops.empty(),
+          "last_drops of partition({}.{}) must be empty",
+          pid.get_app_id(),
+          pid.get_partition_index());
     for (auto iter = drop_list.rbegin(); iter != drop_list.rend(); ++iter) {
         if (pc.last_drops.size() + 1 >= max_replica_count)
             break;
@@ -215,7 +215,7 @@ bool collect_replica(meta_view view, const rpc_address &node, const replica_info
 
     // adjust the drop list
     int ans = cc.collect_drop_replica(node, info);
-    dassert(cc.check_order(), "");
+    CHECK(cc.check_order(), "");
 
     return info.status == partition_status::PS_POTENTIAL_SECONDARY || ans != -1;
 }
@@ -421,11 +421,11 @@ int config_context::collect_drop_replica(const rpc_address &node, const replica_
 
     iter = find_from_dropped(node);
     if (iter == dropped.end()) {
-        dassert(!in_dropped,
-                "adjust position of existing node(%s) failed, this is a bug, partition(%d.%d)",
-                node.to_string(),
-                config_owner->pid.get_app_id(),
-                config_owner->pid.get_partition_index());
+        CHECK(!in_dropped,
+              "adjust position of existing node({}) failed, this is a bug, partition({}.{})",
+              node.to_string(),
+              config_owner->pid.get_app_id(),
+              config_owner->pid.get_partition_index());
         return -1;
     }
     return in_dropped ? 1 : 0;
diff --git a/src/meta/meta_server_failure_detector.cpp b/src/meta/meta_server_failure_detector.cpp
index 72fbd97aa..1fc0e7fc0 100644
--- a/src/meta/meta_server_failure_detector.cpp
+++ b/src/meta/meta_server_failure_detector.cpp
@@ -197,9 +197,9 @@ void meta_server_failure_detector::reset_stability_stat(const rpc_address &node)
 void meta_server_failure_detector::leader_initialize(const std::string &lock_service_owner)
 {
     dsn::rpc_address addr;
-    dassert(addr.from_string_ipv4(lock_service_owner.c_str()),
-            "parse %s to rpc_address failed",
-            lock_service_owner.c_str());
+    CHECK(addr.from_string_ipv4(lock_service_owner.c_str()),
+          "parse {} to rpc_address failed",
+          lock_service_owner);
     dassert(addr == dsn_primary_address(),
             "acquire leader return success, but owner not match: %s vs %s",
             addr.to_string(),
diff --git a/src/meta/meta_server_failure_detector.h b/src/meta/meta_server_failure_detector.h
index 6dea2dec4..a7ec52a5b 100644
--- a/src/meta/meta_server_failure_detector.h
+++ b/src/meta/meta_server_failure_detector.h
@@ -24,22 +24,13 @@
  * THE SOFTWARE.
  */
 
-/*
- * Description:
- *     What is this file about?
- *
- * Revision history:
- *     xxxx-xx-xx, author, first version
- *     xxxx-xx-xx, author, fix bug about xxx
- */
-
 #pragma once
 
-#include "failure_detector/failure_detector.h"
-#include "utils/distributed_lock_service.h"
-
 #include "common/replication_common.h"
+#include "failure_detector/failure_detector.h"
 #include "meta_options.h"
+#include "utils/distributed_lock_service.h"
+#include "utils/fmt_logging.h"
 
 namespace dsn {
 namespace replication {
@@ -86,9 +77,9 @@ public:
     // client side
     virtual void on_master_disconnected(const std::vector<rpc_address> &)
     {
-        dassert(false, "unsupported method");
+        CHECK(false, "unsupported method");
     }
-    virtual void on_master_connected(rpc_address) { dassert(false, "unsupported method"); }
+    virtual void on_master_connected(rpc_address) { CHECK(false, "unsupported method"); }
 
     // server side
     // it is in the protection of failure_detector::_lock
diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp
index ffdf440eb..58f011f86 100644
--- a/src/meta/meta_service.cpp
+++ b/src/meta/meta_service.cpp
@@ -303,7 +303,7 @@ void meta_service::start_service()
 // the start function is executed in threadpool default
 error_code meta_service::start()
 {
-    dassert(!_started, "meta service is already started");
+    CHECK(!_started, "meta service is already started");
     register_ctrl_commands();
 
     error_code err;
@@ -337,7 +337,7 @@ error_code meta_service::start()
     dist::cmd::register_remote_command_rpc();
 
     _failure_detector->acquire_leader_lock();
-    dassert(_failure_detector->get_leader(nullptr), "must be primary at this point");
+    CHECK(_failure_detector->get_leader(nullptr), "must be primary at this point");
     LOG_INFO("%s got the primary lock, start to recover server state from remote storage",
              dsn_primary_address().to_string());
 
diff --git a/src/meta/meta_split_service.cpp b/src/meta/meta_split_service.cpp
index f42f2ec09..13936a6c8 100644
--- a/src/meta/meta_split_service.cpp
+++ b/src/meta/meta_split_service.cpp
@@ -468,12 +468,12 @@ void meta_split_service::notify_stop_split(notify_stop_split_rpc rpc)
     auto &response = rpc.response();
     zauto_write_lock l(app_lock());
     std::shared_ptr<app_state> app = _state->get_app(request.app_name);
-    dassert_f(app != nullptr, "app({}) is not existed", request.app_name);
-    dassert_f(app->is_stateful, "app({}) is stateless currently", request.app_name);
-    dassert_f(request.meta_split_status == split_status::PAUSING ||
-                  request.meta_split_status == split_status::CANCELING,
-              "invalid split_status({})",
-              dsn::enum_to_string(request.meta_split_status));
+    CHECK(app, "app({}) is not existed", request.app_name);
+    CHECK(app->is_stateful, "app({}) is stateless currently", request.app_name);
+    CHECK(request.meta_split_status == split_status::PAUSING ||
+              request.meta_split_status == split_status::CANCELING,
+          "invalid split_status({})",
+          dsn::enum_to_string(request.meta_split_status));
 
     const std::string &stop_type =
         rpc.request().meta_split_status == split_status::PAUSING ? "pause" : "cancel";
@@ -554,8 +554,8 @@ void meta_split_service::query_child_state(query_child_state_rpc rpc)
 
     zauto_read_lock l(app_lock());
     std::shared_ptr<app_state> app = _state->get_app(app_name);
-    dassert_f(app != nullptr, "app({}) is not existed", app_name);
-    dassert_f(app->is_stateful, "app({}) is stateless currently", app_name);
+    CHECK(app, "app({}) is not existed", app_name);
+    CHECK(app->is_stateful, "app({}) is stateless currently", app_name);
 
     if (app->partition_count == request.partition_count) {
         response.err = ERR_INVALID_STATE;
diff --git a/src/meta/meta_state_service_simple.cpp b/src/meta/meta_state_service_simple.cpp
index e74a21163..21a8771dc 100644
--- a/src/meta/meta_state_service_simple.cpp
+++ b/src/meta/meta_state_service_simple.cpp
@@ -31,9 +31,10 @@
 #include <stack>
 #include <utility>
 
-#include "runtime/task/task.h"
 #include "runtime/task/async_calls.h"
+#include "runtime/task/task.h"
 #include "utils/filesystem.h"
+#include "utils/fmt_logging.h"
 
 namespace dsn {
 namespace dist {
@@ -78,7 +79,7 @@ void meta_state_service_simple::write_log(blob &&log_blob,
     uint64_t log_offset = _offset;
     _offset += log_blob.length();
     auto continuation_task = std::unique_ptr<operation>(new operation(false, [=](bool log_succeed) {
-        dassert(log_succeed, "we cannot handle logging failure now");
+        CHECK(log_succeed, "we cannot handle logging failure now");
         __err_cb_bind_and_enqueue(task, internal_operation(), 0);
     }));
     auto continuation_task_ptr = continuation_task.get();
@@ -92,8 +93,8 @@ void meta_state_service_simple::write_log(blob &&log_blob,
                 LPC_META_STATE_SERVICE_SIMPLE_INTERNAL,
                 &_tracker,
                 [=](error_code err, size_t bytes) {
-                    dassert(err == ERR_OK && bytes == log_blob.length(),
-                            "we cannot handle logging failure now");
+                    CHECK(err == ERR_OK && bytes == log_blob.length(),
+                          "we cannot handle logging failure now");
                     _log_lock.lock();
                     continuation_task_ptr->done = true;
                     while (!_task_queue.empty()) {
@@ -200,7 +201,7 @@ error_code meta_state_service_simple::apply_transaction(
     LOG_DEBUG("internal operation after logged");
     simple_transaction_entries *entries =
         dynamic_cast<simple_transaction_entries *>(t_entries.get());
-    dassert(entries != nullptr, "invalid input parameter");
+    CHECK_NOTNULL(entries, "invalid input parameter");
     error_code ec;
     for (int i = 0; i != entries->_offset; ++i) {
         operation_entry &e = entries->_ops[i];
@@ -215,7 +216,7 @@ error_code meta_state_service_simple::apply_transaction(
             ec = set_data_internal(e._node, e._value);
             break;
         default:
-            dassert(false, "unsupported operation");
+            CHECK(false, "unsupported operation");
         }
         dassert(ec == ERR_OK, "unexpected error when applying, err=%s", ec.to_string());
     }
@@ -274,7 +275,7 @@ error_code meta_state_service_simple::initialize(const std::vector<std::string>
                 default:
                     // The log is complete but its content is modified by cosmic ray. This is
                     // unacceptable
-                    dassert(false, "meta state server log corrupted");
+                    CHECK(false, "meta state server log corrupted");
                 }
             }
             fclose(fd);
@@ -370,7 +371,7 @@ task_ptr meta_state_service_simple::submit_transaction(
             }
         } break;
         default:
-            dassert(false, "not supported operation");
+            CHECK(false, "not supported operation");
             break;
         }
 
diff --git a/src/meta/meta_state_service_utils.cpp b/src/meta/meta_state_service_utils.cpp
index c6e56acaa..521de9236 100644
--- a/src/meta/meta_state_service_utils.cpp
+++ b/src/meta/meta_state_service_utils.cpp
@@ -44,7 +44,7 @@ namespace mss {
 meta_storage::meta_storage(dist::meta_state_service *remote_storage, task_tracker *tracker)
     : _remote(remote_storage), _tracker(tracker)
 {
-    dassert(tracker != nullptr, "must set task tracker");
+    CHECK_NOTNULL(tracker, "must set task tracker");
 }
 
 meta_storage::~meta_storage() = default;
@@ -53,7 +53,7 @@ void meta_storage::create_node_recursively(std::queue<std::string> &&nodes,
                                            blob &&value,
                                            std::function<void()> &&cb)
 {
-    dassert(!nodes.empty(), "");
+    CHECK(!nodes.empty(), "");
 
     on_create_recursively op;
     op.initialize(this);
diff --git a/src/meta/partition_guardian.cpp b/src/meta/partition_guardian.cpp
index f0c7db805..0f5cf1349 100644
--- a/src/meta/partition_guardian.cpp
+++ b/src/meta/partition_guardian.cpp
@@ -50,8 +50,8 @@ pc_status partition_guardian::cure(meta_view view,
     const partition_configuration &pc = *get_config(*(view.apps), gpid);
     const proposal_actions &acts = get_config_context(*view.apps, gpid)->lb_actions;
 
-    dassert(app->is_stateful, "");
-    dassert(acts.empty(), "");
+    CHECK(app->is_stateful, "");
+    CHECK(acts.empty(), "");
 
     pc_status status;
     if (pc.primary.is_invalid())
@@ -79,10 +79,10 @@ void partition_guardian::reconfig(meta_view view, const configuration_update_req
     config_context *cc = get_config_context(*(view.apps), gpid);
     if (!cc->lb_actions.empty()) {
         const configuration_proposal_action *current = cc->lb_actions.front();
-        dassert(current != nullptr && current->type != config_type::CT_INVALID,
-                "invalid proposal for gpid(%d.%d)",
-                gpid.get_app_id(),
-                gpid.get_partition_index());
+        CHECK(current != nullptr && current->type != config_type::CT_INVALID,
+              "invalid proposal for gpid({}.{})",
+              gpid.get_app_id(),
+              gpid.get_partition_index());
         // if the valid proposal is from cure
         if (!cc->lb_actions.is_from_balancer()) {
             finish_cure_proposal(view, gpid, *current);
@@ -108,9 +108,9 @@ void partition_guardian::reconfig(meta_view view, const configuration_update_req
             } else {
                 cc->remove_from_serving(request.node);
 
-                dassert(cc->record_drop_history(request.node),
-                        "node(%s) has been in the dropped",
-                        request.node.to_string());
+                CHECK(cc->record_drop_history(request.node),
+                      "node({}) has been in the dropped",
+                      request.node.to_string());
             }
         });
     }
@@ -220,9 +220,8 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
 
         for (int i = 0; i < pc.secondaries.size(); ++i) {
             node_state *ns = get_node_state(*(view.nodes), pc.secondaries[i], false);
-            dassert(ns != nullptr,
-                    "invalid secondary address, address = %s",
-                    pc.secondaries[i].to_string());
+            CHECK_NOTNULL(
+                ns, "invalid secondary address, address = {}", pc.secondaries[i].to_string());
             if (!ns->alive())
                 continue;
 
@@ -588,9 +587,9 @@ pc_status partition_guardian::on_missing_secondary(meta_view &view, const dsn::g
         // if not emergency, only try to recover last dropped server
         const dropped_replica &server = cc.dropped.back();
         if (is_node_alive(*view.nodes, server.node)) {
-            dassert(!server.node.is_invalid(),
-                    "invalid server address, address = %s",
-                    server.node.to_string());
+            CHECK(!server.node.is_invalid(),
+                  "invalid server address, address = {}",
+                  server.node.to_string());
             action.node = server.node;
         }
 
@@ -612,7 +611,7 @@ pc_status partition_guardian::on_missing_secondary(meta_view &view, const dsn::g
         action.target = pc.primary;
 
         newly_partitions *np = get_newly_partitions(*(view.nodes), action.node);
-        dassert(np != nullptr, "");
+        CHECK_NOTNULL(np, "");
         np->newly_add_partition(gpid.get_app_id());
 
         cc.lb_actions.assign_cure_proposal(action);
diff --git a/src/meta/server_load_balancer.cpp b/src/meta/server_load_balancer.cpp
index c5f21b84b..05ca80081 100644
--- a/src/meta/server_load_balancer.cpp
+++ b/src/meta/server_load_balancer.cpp
@@ -188,12 +188,13 @@ void server_load_balancer::apply_balancer(meta_view view, const migration_list &
         configuration_balancer_response resp;
         for (auto &pairs : ml) {
             register_proposals(view, *pairs.second, resp);
+            // TODO(yingchun): use dcheck_eq instead
             if (resp.err != dsn::ERR_OK) {
                 const dsn::gpid &pid = pairs.first;
-                dassert(false,
-                        "apply balancer for gpid(%d.%d) failed",
-                        pid.get_app_id(),
-                        pid.get_partition_index());
+                CHECK(false,
+                      "apply balancer for gpid({}.{}) failed",
+                      pid.get_app_id(),
+                      pid.get_partition_index());
             }
         }
     }
diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp
index 7621b7fc6..607901ce8 100644
--- a/src/meta/server_state.cpp
+++ b/src/meta/server_state.cpp
@@ -133,7 +133,7 @@ void server_state::register_cli_commands()
             }
             return std::string(err.to_string());
         });
-    dassert(_cli_dump_handle != nullptr, "register cli handler failed");
+    CHECK_NOTNULL(_cli_dump_handle, "register cli handler failed");
 
     _ctrl_add_secondary_enable_flow_control = dsn::command_manager::instance().register_command(
         {"meta.lb.add_secondary_enable_flow_control"},
@@ -143,7 +143,7 @@ void server_state::register_cli_commands()
             return remote_command_set_bool_flag(
                 _add_secondary_enable_flow_control, "lb.add_secondary_enable_flow_control", args);
         });
-    dassert(_ctrl_add_secondary_enable_flow_control, "register cli handler failed");
+    CHECK(_ctrl_add_secondary_enable_flow_control, "register cli handler failed");
 
     _ctrl_add_secondary_max_count_for_one_node = dsn::command_manager::instance().register_command(
         {"meta.lb.add_secondary_max_count_for_one_node"},
@@ -168,7 +168,7 @@ void server_state::register_cli_commands()
             }
             return result;
         });
-    dassert(_ctrl_add_secondary_max_count_for_one_node, "register cli handler failed");
+    CHECK(_ctrl_add_secondary_max_count_for_one_node, "register cli handler failed");
 }
 
 void server_state::initialize(meta_service *meta_svc, const std::string &apps_root)
@@ -278,10 +278,10 @@ void server_state::transition_staging_state(std::shared_ptr<app_state> &app)
         resp.info = *app;
         send_response(_meta_svc, app->helpers->pending_response, resp);
     } else {
-        dassert(false,
-                "app(%s) not in staging state(%s)",
-                app->get_logname(),
-                enum_to_string(app->status));
+        CHECK(false,
+              "app({}) not in staging state({})",
+              app->get_logname(),
+              enum_to_string(app->status));
     }
 
     LOG_INFO("app(%s) transfer from %s to %s",
@@ -303,7 +303,7 @@ void server_state::process_one_partition(std::shared_ptr<app_state> &app)
     } else if (ans == 0) {
         transition_staging_state(app);
     } else {
-        dassert(false, "partitions in progress(%d) shouldn't be negetive", ans);
+        CHECK(false, "partitions in progress({}) shouldn't be negetive", ans);
     }
 }
 
@@ -3440,16 +3440,16 @@ void server_state::do_update_max_replica_count(std::shared_ptr<app_state> &app,
                 continue;
             }
 
-            dassert_f(false,
-                      "An error that can't be handled occurs while updating partition-level"
-                      "max_replica_count: error_code={}, app_name={}, app_id={}, "
-                      "partition_index={}, partition_count={}, new_max_replica_count={}",
-                      ec.to_string(),
-                      app_name,
-                      app->app_id,
-                      i,
-                      app->partition_count,
-                      new_max_replica_count);
+            CHECK(false,
+                  "An error that can't be handled occurs while updating partition-level"
+                  "max_replica_count: error_code={}, app_name={}, app_id={}, "
+                  "partition_index={}, partition_count={}, new_max_replica_count={}",
+                  ec.to_string(),
+                  app_name,
+                  app->app_id,
+                  i,
+                  app->partition_count,
+                  new_max_replica_count);
         }
 
         LOG_INFO_F("all partitions have been changed to the new max_replica_count, ready to update "
@@ -3800,14 +3800,14 @@ void server_state::recover_from_max_replica_count_env()
             int32_t max_replica_count = 0;
             if (args.size() < 2 || !dsn::buf2int32(args[1], max_replica_count) ||
                 max_replica_count <= 0) {
-                dassert_f(false,
-                          "invalid max_replica_count_env: app_name={}, app_id={}, "
-                          "max_replica_count={}, {}={}",
-                          app->app_name,
-                          app->app_id,
-                          app->max_replica_count,
-                          replica_envs::UPDATE_MAX_REPLICA_COUNT,
-                          iter->second);
+                CHECK(false,
+                      "invalid max_replica_count_env: app_name={}, app_id={}, "
+                      "max_replica_count={}, {}={}",
+                      app->app_name,
+                      app->app_id,
+                      app->max_replica_count,
+                      replica_envs::UPDATE_MAX_REPLICA_COUNT,
+                      iter->second);
             }
 
             tasks.emplace_back(app, max_replica_count);
diff --git a/src/meta/server_state_restore.cpp b/src/meta/server_state_restore.cpp
index 58fc62205..d48c3f52d 100644
--- a/src/meta/server_state_restore.cpp
+++ b/src/meta/server_state_restore.cpp
@@ -74,7 +74,7 @@ void server_state::sync_app_from_backup_media(
         callback_tsk->enqueue_with(err, dsn::blob());
         return;
     }
-    dassert(file_handle != nullptr, "create file from backup media ecounter error");
+    CHECK_NOTNULL(file_handle, "create file from backup media ecounter error");
     file_handle->read(
         read_request{0, -1}, TASK_CODE_EXEC_INLINED, [callback_tsk](const read_response &resp) {
             callback_tsk->enqueue_with(resp.err, resp.buffer);
@@ -159,7 +159,7 @@ void server_state::restore_app(dsn::message_ex *msg)
                 if (pair.first != ERR_OK) {
                     ec = pair.first;
                 } else {
-                    dassert(pair.second != nullptr, "app info shouldn't be empty");
+                    CHECK_NOTNULL(pair.second, "app info shouldn't be empty");
                     // the same with create_app
                     do_app_create(pair.second);
                     return;
diff --git a/src/meta/test/balancer_simulator/balancer_simulator.cpp b/src/meta/test/balancer_simulator/balancer_simulator.cpp
index 710d9d610..2fa7b1b7b 100644
--- a/src/meta/test/balancer_simulator/balancer_simulator.cpp
+++ b/src/meta/test/balancer_simulator/balancer_simulator.cpp
@@ -37,17 +37,17 @@ using namespace dsn::replication;
 #ifdef ASSERT_EQ
 #undef ASSERT_EQ
 #endif
-#define ASSERT_EQ(left, right) dassert((left) == (right), "")
+#define ASSERT_EQ(left, right) CHECK((left) == (right), "")
 
 #ifdef ASSERT_TRUE
 #undef ASSERT_TRUE
 #endif
-#define ASSERT_TRUE(exp) dassert((exp), "")
+#define ASSERT_TRUE(exp) CHECK((exp), "")
 
 #ifdef ASSERT_FALSE
 #undef ASSERT_FALSE
 #endif
-#define ASSERT_FALSE(exp) dassert(!(exp), "")
+#define ASSERT_FALSE(exp) CHECK(!(exp), "")
 
 class simple_priority_queue
 {
diff --git a/src/meta/test/balancer_validator.cpp b/src/meta/test/balancer_validator.cpp
index 10a33d0a1..60f5146c4 100644
--- a/src/meta/test/balancer_validator.cpp
+++ b/src/meta/test/balancer_validator.cpp
@@ -71,18 +71,18 @@ namespace replication {
 #undef ASSERT_EQ
 #endif
 
-#define ASSERT_EQ(left, right) dassert((left) == (right), "")
+#define ASSERT_EQ(left, right) CHECK((left) == (right), "")
 
 #ifdef ASSERT_TRUE
 #undef ASSERT_TRUE
 #endif
 
-#define ASSERT_TRUE(exp) dassert((exp), "")
+#define ASSERT_TRUE(exp) CHECK((exp), "")
 
 #ifdef ASSERT_FALSE
 #undef ASSERT_FALSE
 #endif
-#define ASSERT_FALSE(exp) dassert(!(exp), "")
+#define ASSERT_FALSE(exp) CHECK(!(exp), "")
 
 static void check_cure(app_mapper &apps, node_mapper &nodes, ::dsn::partition_configuration &pc)
 {
diff --git a/src/meta/test/meta_app_operation_test.cpp b/src/meta/test/meta_app_operation_test.cpp
index 53c414dec..b320e46ef 100644
--- a/src/meta/test/meta_app_operation_test.cpp
+++ b/src/meta/test/meta_app_operation_test.cpp
@@ -217,7 +217,7 @@ public:
                                                  int32_t expected_max_replica_count)
     {
         auto app = find_app(app_name);
-        dassert_f(app != nullptr, "app({}) does not exist", app_name);
+        CHECK(app, "app({}) does not exist", app_name);
 
         auto partition_size = static_cast<int>(app->partitions.size());
         for (int i = 0; i < partition_size; ++i) {
@@ -251,7 +251,7 @@ public:
                                       int32_t expected_max_replica_count)
     {
         auto app = find_app(app_name);
-        dassert_f(app != nullptr, "app({}) does not exist", app_name);
+        CHECK(app, "app({}) does not exist", app_name);
 
         // verify local max_replica_count of the app
         ASSERT_EQ(app->max_replica_count, expected_max_replica_count);
diff --git a/src/meta/test/meta_state/meta_state_service.cpp b/src/meta/test/meta_state/meta_state_service.cpp
index 3ca847479..4bea8abc1 100644
--- a/src/meta/test/meta_state/meta_state_service.cpp
+++ b/src/meta/test/meta_state/meta_state_service.cpp
@@ -25,6 +25,7 @@
  */
 
 #include "meta/meta_state_service.h"
+
 #include <boost/lexical_cast.hpp>
 
 #include <gtest/gtest.h>
@@ -33,6 +34,7 @@
 
 #include "meta/meta_state_service_simple.h"
 #include "meta/meta_state_service_zookeeper.h"
+#include "utils/fmt_logging.h"
 
 using namespace dsn;
 using namespace dsn::dist;
@@ -62,9 +64,9 @@ void provider_basic_test(const service_creator_func &service_creator,
         service->get_children("/1",
                               META_STATE_SERVICE_SIMPLE_TEST_CALLBACK,
                               [](error_code ec, const std::vector<std::string> &children) {
-                                  dassert(ec == ERR_OK && children.size() == 1 &&
-                                              *children.begin() == "1",
-                                          "unexpected child");
+                                  CHECK(ec == ERR_OK && children.size() == 1 &&
+                                            *children.begin() == "1",
+                                        "unexpected child");
                               });
         service->node_exist("/1/1", META_STATE_SERVICE_SIMPLE_TEST_CALLBACK, expect_ok)->wait();
         service->delete_node("/1", false, META_STATE_SERVICE_SIMPLE_TEST_CALLBACK, expect_err)
diff --git a/src/meta/test/misc/misc.cpp b/src/meta/test/misc/misc.cpp
index 8d6ee7ee3..147b0432d 100644
--- a/src/meta/test/misc/misc.cpp
+++ b/src/meta/test/misc/misc.cpp
@@ -34,9 +34,9 @@
 
 using namespace dsn::replication;
 
-#define ASSERT_EQ(left, right) dassert((left) == (right), "")
-#define ASSERT_TRUE(exp) dassert((exp), "")
-#define ASSERT_FALSE(exp) dassert(!(exp), "")
+#define ASSERT_EQ(left, right) CHECK((left) == (right), "")
+#define ASSERT_TRUE(exp) CHECK((exp), "")
+#define ASSERT_FALSE(exp) CHECK(!(exp), "")
 
 uint32_t random32(uint32_t min, uint32_t max)
 {
diff --git a/src/nfs/nfs_client_impl.cpp b/src/nfs/nfs_client_impl.cpp
index 7b728f85c..98eebde13 100644
--- a/src/nfs/nfs_client_impl.cpp
+++ b/src/nfs/nfs_client_impl.cpp
@@ -30,8 +30,9 @@
 
 #include <queue>
 
-#include "utils/filesystem.h"
 #include "utils/command_manager.h"
+#include "utils/filesystem.h"
+#include "utils/fmt_logging.h"
 
 namespace dsn {
 namespace service {
@@ -449,9 +450,7 @@ void nfs_client_impl::continue_write()
     std::string file_path =
         dsn::utils::filesystem::path_combine(fc->user_req->file_size_req.dst_dir, fc->file_name);
     std::string path = dsn::utils::filesystem::remove_file_name(file_path.c_str());
-    if (!dsn::utils::filesystem::create_directory(path)) {
-        dassert(false, "create directory %s failed", path.c_str());
-    }
+    CHECK(dsn::utils::filesystem::create_directory(path), "create directory {} failed", path);
 
     if (!fc->file_holder->file_handle) {
         // double check
diff --git a/src/perf_counter/perf_counter_atomic.h b/src/perf_counter/perf_counter_atomic.h
index 586ddd574..6e9284809 100644
--- a/src/perf_counter/perf_counter_atomic.h
+++ b/src/perf_counter/perf_counter_atomic.h
@@ -16,13 +16,16 @@
 // under the License.
 
 #include <atomic>
+
 #include <boost/make_shared.hpp>
-#include "utils/utils.h"
-#include "utils/config_api.h"
-#include "utils/api_utilities.h"
+
 #include "perf_counter/perf_counter.h"
-#include "utils/time_utils.h"
+#include "utils/api_utilities.h"
+#include "utils/config_api.h"
+#include "utils/fmt_logging.h"
 #include "utils/shared_io_service.h"
+#include "utils/time_utils.h"
+#include "utils/utils.h"
 
 namespace dsn {
 
@@ -90,7 +93,7 @@ public:
     }
     virtual double get_percentile(dsn_perf_counter_percentile_type_t type)
     {
-        dassert(false, "invalid execution flow");
+        CHECK(false, "invalid execution flow");
         return 0.0;
     }
 
@@ -165,7 +168,7 @@ public:
         uint64_t task_id = static_cast<int>(utils::get_current_tid());
         _val[task_id % DIVIDE_CONTAINER].fetch_add(val, std::memory_order_relaxed);
     }
-    virtual void set(int64_t val) { dassert(false, "invalid execution flow"); }
+    virtual void set(int64_t val) { CHECK(false, "invalid execution flow"); }
     virtual double get_value()
     {
         uint64_t now = utils::get_current_physical_time_ns();
@@ -185,7 +188,7 @@ public:
     virtual int64_t get_integer_value() { return (int64_t)get_value(); }
     virtual double get_percentile(dsn_perf_counter_percentile_type_t type)
     {
-        dassert(false, "invalid execution flow");
+        CHECK(false, "invalid execution flow");
         return 0.0;
     }
 
@@ -244,9 +247,9 @@ public:
         }
     }
 
-    virtual void increment() { dassert(false, "invalid execution flow"); }
-    virtual void decrement() { dassert(false, "invalid execution flow"); }
-    virtual void add(int64_t val) { dassert(false, "invalid execution flow"); }
+    virtual void increment() { CHECK(false, "invalid execution flow"); }
+    virtual void decrement() { CHECK(false, "invalid execution flow"); }
+    virtual void add(int64_t val) { CHECK(false, "invalid execution flow"); }
     virtual void set(int64_t val)
     {
         uint64_t idx = _tail.fetch_add(1, std::memory_order_relaxed);
@@ -255,7 +258,7 @@ public:
 
     virtual double get_value()
     {
-        dassert(false, "invalid execution flow");
+        CHECK(false, "invalid execution flow");
         return 0.0;
     }
     virtual int64_t get_integer_value() { return (int64_t)get_value(); }
@@ -263,7 +266,7 @@ public:
     virtual double get_percentile(dsn_perf_counter_percentile_type_t type)
     {
         if ((type < 0) || (type >= COUNTER_PERCENTILE_COUNT)) {
-            dassert(false, "send a wrong counter percentile type");
+            CHECK(false, "send a wrong counter percentile type");
             return 0.0;
         }
         return (double)_results[type];
@@ -363,7 +366,7 @@ private:
                 if (ctx->ask[i] == 1) {
                     _results[i] = ctx->tmp[left];
                 } else
-                    dassert(false, "select percentail wrong!!!");
+                    CHECK(false, "select percentail wrong!!!");
             return;
         }
 
@@ -438,7 +441,7 @@ private:
     void on_timer(std::shared_ptr<boost::asio::deadline_timer> timer,
                   const boost::system::error_code &ec)
     {
-        // as the callback is not in tls context, so the log system calls like LOG_INFO, dassert
+        // as the callback is not in tls context, so the log system calls like LOG_INFO, CHECK
         // will cause a lock
         if (!ec) {
             calc(boost::make_shared<compute_context>());
@@ -450,7 +453,7 @@ private:
                                         timer,
                                         std::placeholders::_1));
         } else if (boost::system::errc::operation_canceled != ec) {
-            dassert(false, "on_timer error!!!");
+            CHECK(false, "on_timer error!!!");
         }
     }
 
diff --git a/src/perf_counter/perf_counters.cpp b/src/perf_counter/perf_counters.cpp
index 99ba217bc..1dae74622 100644
--- a/src/perf_counter/perf_counters.cpp
+++ b/src/perf_counter/perf_counters.cpp
@@ -109,7 +109,7 @@ perf_counter_ptr perf_counters::get_app_counter(const char *section,
                                                 bool create_if_not_exist)
 {
     auto cnode = task::get_current_node2();
-    dassert(cnode != nullptr, "cannot get current service node!");
+    CHECK_NOTNULL(cnode, "cannot get current service node!");
     return get_global_counter(cnode->full_name(), section, name, flags, dsptr, create_if_not_exist);
 }
 
@@ -196,7 +196,7 @@ perf_counter *perf_counters::new_counter(const char *app,
     else if (type == dsn_perf_counter_type_t::COUNTER_TYPE_NUMBER_PERCENTILES)
         return new perf_counter_number_percentile_atomic(app, section, name, type, dsptr);
     else {
-        dassert(false, "invalid type(%d)", type);
+        CHECK(false, "invalid type({})", type);
         return nullptr;
     }
 }
diff --git a/src/redis_protocol/proxy_lib/redis_parser.cpp b/src/redis_protocol/proxy_lib/redis_parser.cpp
index fdff8b70c..ab2f7a2b7 100644
--- a/src/redis_protocol/proxy_lib/redis_parser.cpp
+++ b/src/redis_protocol/proxy_lib/redis_parser.cpp
@@ -933,7 +933,7 @@ void redis_parser::decr_by(message_entry &entry) { counter_internal(entry); }
 
 void redis_parser::counter_internal(message_entry &entry)
 {
-    dassert(!entry.request.sub_requests.empty(), "");
+    CHECK(!entry.request.sub_requests.empty(), "");
     dassert(entry.request.sub_requests[0].length > 0, "");
     const char *command = entry.request.sub_requests[0].data.data();
     int64_t increment = 1;
diff --git a/src/replica/replica_stub.cpp b/src/replica/replica_stub.cpp
index 4345ddd05..6952b4549 100644
--- a/src/replica/replica_stub.cpp
+++ b/src/replica/replica_stub.cpp
@@ -507,22 +507,19 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f
 
     // clear dirs if need
     if (clear) {
-        if (!dsn::utils::filesystem::remove_path(_options.slog_dir)) {
-            dassert(false, "Fail to remove %s.", _options.slog_dir.c_str());
-        }
+        CHECK(dsn::utils::filesystem::remove_path(_options.slog_dir),
+              "Fail to remove {}.",
+              _options.slog_dir);
         for (auto &dir : _options.data_dirs) {
-            if (!dsn::utils::filesystem::remove_path(dir)) {
-                dassert(false, "Fail to remove %s.", dir.c_str());
-            }
+            CHECK(dsn::utils::filesystem::remove_path(dir), "Fail to remove {}.", dir);
         }
     }
 
     // init dirs
     std::string cdir;
     std::string err_msg;
-    if (!dsn::utils::filesystem::create_directory(_options.slog_dir, cdir, err_msg)) {
-        dassert_f(false, "{}", err_msg);
-    }
+    CHECK(
+        dsn::utils::filesystem::create_directory(_options.slog_dir, cdir, err_msg), "{}", err_msg);
     _options.slog_dir = cdir;
     initialize_fs_manager(_options.data_dirs, _options.data_dir_tags);
 
@@ -538,9 +535,9 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f
     std::vector<std::string> dir_list;
     for (auto &dir : _fs_manager.get_available_data_dirs()) {
         std::vector<std::string> tmp_list;
-        if (!dsn::utils::filesystem::get_subdirectories(dir, tmp_list, false)) {
-            dassert(false, "Fail to get subdirectories in %s.", dir.c_str());
-        }
+        CHECK(dsn::utils::filesystem::get_subdirectories(dir, tmp_list, false),
+              "Fail to get subdirectories in {}.",
+              dir);
         dir_list.insert(dir_list.end(), tmp_list.begin(), tmp_list.end());
     }
 
@@ -773,7 +770,7 @@ void replica_stub::initialize_fs_manager(std::vector<std::string> &data_dirs,
             if (FLAGS_ignore_broken_disk) {
                 LOG_WARNING_F("data dir[{}] is broken, ignore it, error:{}", dir, err_msg);
             } else {
-                dassert_f(false, "{}", err_msg);
+                CHECK(false, "{}", err_msg);
             }
             continue;
         }
@@ -2752,7 +2749,7 @@ replica_stub::get_child_dir(const char *app_type, gpid child_pid, const std::str
             break;
         }
     }
-    dassert_f(!child_dir.empty(), "can not find parent_dir {} in data_dirs", parent_dir);
+    CHECK(!child_dir.empty(), "can not find parent_dir {} in data_dirs", parent_dir);
     return child_dir;
 }
 
@@ -2870,7 +2867,7 @@ replica_ptr replica_stub::create_child_replica_if_not_found(gpid child_pid,
             replica *rep = replica::newr(this, child_pid, *app, false, false, parent_dir);
             if (rep != nullptr) {
                 auto pr = _replicas.insert(replicas::value_type(child_pid, rep));
-                dassert_f(pr.second, "child replica {} has been existed", rep->name());
+                CHECK(pr.second, "child replica {} has been existed", rep->name());
                 _counter_replicas_count->increment();
                 _closed_replicas.erase(child_pid);
             }
diff --git a/src/runtime/security/kinit_context.cpp b/src/runtime/security/kinit_context.cpp
index 24c0dab21..39951638c 100644
--- a/src/runtime/security/kinit_context.cpp
+++ b/src/runtime/security/kinit_context.cpp
@@ -54,9 +54,9 @@ DSN_DEFINE_string("security", service_name, "", "service name");
 // will not pass.
 error_s check_configuration()
 {
-    dassert(FLAGS_enable_auth || FLAGS_enable_zookeeper_kerberos,
-            "There is no need to check configuration if FLAGS_enable_auth"
-            " and FLAGS_enable_zookeeper_kerberos both are not true");
+    CHECK(FLAGS_enable_auth || FLAGS_enable_zookeeper_kerberos,
+          "There is no need to check configuration if FLAGS_enable_auth"
+          " and FLAGS_enable_zookeeper_kerberos both are not true");
 
     if (0 == strlen(FLAGS_krb5_keytab) || !utils::filesystem::file_exists(FLAGS_krb5_keytab)) {
         return error_s::make(ERR_INVALID_PARAMETERS,
diff --git a/src/runtime/security/negotiation_manager.cpp b/src/runtime/security/negotiation_manager.cpp
index fcf1817c5..47740ce8e 100644
--- a/src/runtime/security/negotiation_manager.cpp
+++ b/src/runtime/security/negotiation_manager.cpp
@@ -56,8 +56,8 @@ void negotiation_manager::open_service()
 
 void negotiation_manager::on_negotiation_request(negotiation_rpc rpc)
 {
-    dassert(!rpc.dsn_request()->io_session->is_client(),
-            "only server session receives negotiation request");
+    CHECK(!rpc.dsn_request()->io_session->is_client(),
+          "only server session receives negotiation request");
 
     // reply SASL_AUTH_DISABLE if auth is not enable
     if (!security::FLAGS_enable_auth) {
@@ -74,8 +74,8 @@ void negotiation_manager::on_negotiation_request(negotiation_rpc rpc)
 
 void negotiation_manager::on_negotiation_response(error_code err, negotiation_rpc rpc)
 {
-    dassert(rpc.dsn_request()->io_session->is_client(),
-            "only client session receives negotiation response");
+    CHECK(rpc.dsn_request()->io_session->is_client(),
+          "only client session receives negotiation response");
 
     std::shared_ptr<negotiation> nego = get_negotiation(rpc);
     if (nullptr != nego) {
diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp
index 899bcb2aa..84581cc66 100644
--- a/src/server/pegasus_server_impl.cpp
+++ b/src/server/pegasus_server_impl.cpp
@@ -1769,8 +1769,8 @@ void pegasus_server_impl::cancel_background_work(bool wait)
 ::dsn::error_code pegasus_server_impl::stop(bool clear_state)
 {
     if (!_is_open) {
-        dassert(_db == nullptr, "");
-        dassert(!clear_state, "should not be here if do clear");
+        CHECK(_db == nullptr, "");
+        CHECK(!clear_state, "should not be here if do clear");
         return ::dsn::ERR_OK;
     }
 
@@ -2242,7 +2242,7 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode,
         return err;
     }
 
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
     dassert(ci == last_durable_decree(), "%" PRId64 " VS %" PRId64 "", ci, last_durable_decree());
 
     LOG_INFO("%s: apply checkpoint succeed, last_durable_decree = %" PRId64,
@@ -2276,7 +2276,7 @@ bool pegasus_server_impl::validate_filter(::dsn::apps::filter_type::type filter_
         }
     }
     default:
-        dassert(false, "unsupported filter type: %d", filter_type);
+        CHECK(false, "unsupported filter type: %d", filter_type);
     }
     return false;
 }
diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h
index fb5bcef6f..c1185d244 100644
--- a/src/shell/command_helper.h
+++ b/src/shell/command_helper.h
@@ -1216,7 +1216,7 @@ get_app_stat(shell_context *sc, const std::string &app_name, std::vector<row_dat
                 std::string counter_name;
                 bool parse_ret = parse_app_pegasus_perf_counter_name(
                     m.name, app_id_x, partition_index_x, counter_name);
-                dassert(parse_ret, "name = %s", m.name.c_str());
+                CHECK(parse_ret, "name = {}", m.name);
                 dassert(app_id_x == app_id, "name = %s", m.name.c_str());
                 dassert(partition_index_x < partition_count, "name = %s", m.name.c_str());
                 if (partitions[partition_index_x].primary != node_addr)
@@ -1279,7 +1279,7 @@ inline bool get_capacity_unit_stat(shell_context *sc,
             int32_t app_id, pidx;
             std::string counter_name;
             bool r = parse_app_pegasus_perf_counter_name(m.name, app_id, pidx, counter_name);
-            dassert(r, "name = %s", m.name.c_str());
+            CHECK(r, "name = {}", m.name);
             if (counter_name == "recent.read.cu") {
                 nodes_stat[i].cu_value_by_app[app_id].first += (int64_t)m.value;
             } else if (counter_name == "recent.write.cu") {
@@ -1346,7 +1346,7 @@ inline bool get_storage_size_stat(shell_context *sc, app_storage_size_stat &st_s
             std::string counter_name;
             bool parse_ret = parse_app_pegasus_perf_counter_name(
                 m.name, app_id_x, partition_index_x, counter_name);
-            dassert(parse_ret, "name = %s", m.name.c_str());
+            CHECK(parse_ret, "name = {}", m.name);
             if (counter_name != "disk.storage.sst(MB)")
                 continue;
             auto find = app_partitions.find(app_id_x);
diff --git a/src/utils/casts.h b/src/utils/casts.h
index 98f6ccfa7..ce20a7770 100644
--- a/src/utils/casts.h
+++ b/src/utils/casts.h
@@ -39,7 +39,7 @@ inline To down_cast(From *from)
     // Use RTTI to do double-check, though in practice the unit tests are seldom built in debug
     // mode. For example, the unit tests of github CI for both rDSN and Pegasus are built in
     // release mode.
-    dassert_f(from == NULL || dynamic_cast<To>(from) != NULL, "");
+    CHECK(from == NULL || dynamic_cast<To>(from) != NULL, "");
 
     return static_cast<To>(from);
 }
diff --git a/src/utils/fmt_logging.h b/src/utils/fmt_logging.h
index 33d843bd6..812673bc7 100644
--- a/src/utils/fmt_logging.h
+++ b/src/utils/fmt_logging.h
@@ -55,15 +55,15 @@
 #define LOG_WARNING_PREFIX(...) LOG_WARNING_F("[{}] {}", log_prefix(), fmt::format(__VA_ARGS__))
 #define LOG_ERROR_PREFIX(...) LOG_ERROR_F("[{}] {}", log_prefix(), fmt::format(__VA_ARGS__))
 #define LOG_FATAL_PREFIX(...) LOG_FATAL_F("[{}] {}", log_prefix(), fmt::format(__VA_ARGS__))
-#define dassert_replica(x, ...) dassert_f(x, "[{}] {}", log_prefix(), fmt::format(__VA_ARGS__))
+#define dassert_replica(x, ...) CHECK(x, "[{}] {}", log_prefix(), fmt::format(__VA_ARGS__))
 
 // Macros to check expected condition. It will abort the application
 // and log a fatal message when the condition is not met.
-#define dcheck_eq(var1, var2) dassert_f(var1 == var2, "{} vs {}", var1, var2)
-#define dcheck_ge(var1, var2) dassert_f(var1 >= var2, "{} vs {}", var1, var2)
-#define dcheck_le(var1, var2) dassert_f(var1 <= var2, "{} vs {}", var1, var2)
-#define dcheck_gt(var1, var2) dassert_f(var1 > var2, "{} vs {}", var1, var2)
-#define dcheck_lt(var1, var2) dassert_f(var1 < var2, "{} vs {}", var1, var2)
+#define dcheck_eq(var1, var2) CHECK(var1 == var2, "{} vs {}", var1, var2)
+#define dcheck_ge(var1, var2) CHECK(var1 >= var2, "{} vs {}", var1, var2)
+#define dcheck_le(var1, var2) CHECK(var1 <= var2, "{} vs {}", var1, var2)
+#define dcheck_gt(var1, var2) CHECK(var1 > var2, "{} vs {}", var1, var2)
+#define dcheck_lt(var1, var2) CHECK(var1 < var2, "{} vs {}", var1, var2)
 
 #define dcheck_eq_replica(var1, var2) dassert_replica(var1 == var2, "{} vs {}", var1, var2)
 #define dcheck_ge_replica(var1, var2) dassert_replica(var1 >= var2, "{} vs {}", var1, var2)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pegasus.apache.org
For additional commands, e-mail: commits-help@pegasus.apache.org