You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pegasus.apache.org by la...@apache.org on 2022/11/05 15:04:56 UTC
[incubator-pegasus] branch master updated: refactor(macro): use CHECK_* to replace dassert_* (part 2) (#1217)
This is an automated email from the ASF dual-hosted git repository.
laiyingchun pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git
The following commit(s) were added to refs/heads/master by this push:
new 0f4bd1501 refactor(macro): use CHECK_* to replace dassert_* (part 2) (#1217)
0f4bd1501 is described below
commit 0f4bd150142418b3b8bd094926f39fb657d287f2
Author: Yingchun Lai <la...@apache.org>
AuthorDate: Sat Nov 5 23:04:51 2022 +0800
refactor(macro): use CHECK_* to replace dassert_* (part 2) (#1217)
---
src/aio/aio_task.cpp | 7 +-
src/aio/disk_engine.cpp | 4 +-
src/base/pegasus_key_schema.h | 19 +-
src/base/pegasus_value_schema.h | 13 +-
src/base/value_schema_v0.cpp | 2 +-
src/base/value_schema_v1.cpp | 2 +-
src/base/value_schema_v2.cpp | 5 +-
src/block_service/directio_writable_file.cpp | 6 +-
src/client_lib/pegasus_client_impl.cpp | 2 +-
src/common/duplication_common.cpp | 18 +-
src/common/fs_manager.cpp | 10 +-
src/common/replication_common.cpp | 16 +-
src/common/serialization_helper/thrift_helper.h | 8 +-
src/common/storage_serverlet.h | 8 +-
.../failure_detector_multimaster.cpp | 5 +-
src/geo/lib/geo_client.cpp | 5 +-
src/http/http_message_parser.cpp | 2 +-
src/meta/greedy_load_balancer.cpp | 4 +-
src/meta/load_balance_policy.cpp | 18 +-
src/meta/meta_backup_service.cpp | 67 ++-
src/meta/meta_data.cpp | 36 +-
src/meta/meta_http_service.cpp | 20 +-
src/meta/meta_server_failure_detector.cpp | 7 +-
src/meta/meta_split_service.cpp | 23 +-
src/meta/meta_state_service_simple.cpp | 10 +-
src/meta/server_load_balancer.cpp | 12 +-
src/meta/server_state.cpp | 556 ++++++++++-----------
src/meta/server_state_restore.cpp | 7 +-
src/meta/test/meta_app_operation_test.cpp | 10 +-
src/meta/test/meta_state/meta_state_service.cpp | 4 +-
src/meta/test/meta_test_base.cpp | 9 +-
src/nfs/nfs_client_impl.cpp | 3 +-
src/nfs/nfs_client_impl.h | 2 +-
src/nfs/nfs_server_impl.h | 2 +-
src/perf_counter/perf_counter_atomic.h | 2 +-
src/perf_counter/perf_counters.cpp | 9 +-
src/redis_protocol/proxy_lib/proxy_layer.cpp | 4 +-
src/redis_protocol/proxy_lib/redis_parser.cpp | 6 +-
src/replica/backup/cold_backup_context.cpp | 4 +-
src/replica/backup/cold_backup_context.h | 12 +-
src/replica/duplication/replica_duplicator.cpp | 15 +-
src/replica/log_file.cpp | 6 +-
src/replica/log_file_stream.h | 2 +-
src/replica/mutation.cpp | 2 +-
src/replica/mutation_cache.cpp | 5 +-
src/replica/mutation_log.cpp | 284 +++++------
src/replica/mutation_log_replay.cpp | 5 +-
src/replica/prepare_list.cpp | 2 +-
src/replica/replica.cpp | 25 +-
src/replica/replica_2pc.cpp | 71 +--
src/replica/replica_backup.cpp | 10 +-
src/replica/replica_check.cpp | 4 +-
src/replica/replica_config.cpp | 206 +++-----
src/replica/replica_failover.cpp | 4 +-
src/replica/replica_init.cpp | 9 +-
src/replica/replica_learn.cpp | 76 +--
src/replica/replica_restore.cpp | 13 +-
src/replica/replica_stub.cpp | 137 +++--
.../storage/simple_kv/simple_kv.server.impl.cpp | 21 +-
src/replica/storage/simple_kv/test/case.cpp | 16 +-
src/replica/storage/simple_kv/test/checker.cpp | 2 +-
src/replica/storage/simple_kv/test/common.cpp | 5 +-
src/replica/storage/simple_kv/test/common.h | 4 +
.../simple_kv/test/simple_kv.server.impl.cpp | 7 +-
src/runtime/global_config.cpp | 2 +-
src/runtime/profiler.cpp | 41 +-
src/runtime/rpc/asio_net_provider.cpp | 18 +-
src/runtime/rpc/dsn_message_parser.cpp | 7 +-
src/runtime/rpc/group_address.h | 4 +-
src/runtime/rpc/message_parser.cpp | 11 +-
src/runtime/rpc/network.cpp | 15 +-
src/runtime/rpc/network.sim.cpp | 6 +-
src/runtime/rpc/rpc_engine.cpp | 20 +-
src/runtime/rpc/rpc_holder.h | 4 +-
src/runtime/rpc/rpc_message.cpp | 9 +-
src/runtime/rpc/thrift_message_parser.cpp | 10 +-
src/runtime/service_api_c.cpp | 6 +-
src/runtime/service_engine.cpp | 2 +-
src/runtime/task/task.cpp | 13 +-
src/runtime/task/task.h | 6 +-
src/runtime/task/task_engine.cpp | 22 +-
src/runtime/task/task_engine.sim.cpp | 20 +-
src/runtime/task/task_spec.cpp | 17 +-
src/runtime/task/task_tracker.h | 20 +-
src/runtime/task/task_worker.cpp | 15 +-
src/runtime/tool_api.cpp | 2 +-
src/server/available_detector.cpp | 6 +-
src/server/brief_stat.cpp | 10 +-
src/server/hashkey_transform.h | 4 +-
src/server/hotspot_partition_calculator.cpp | 10 +-
src/server/pegasus_server_impl.cpp | 17 +-
src/server/pegasus_server_impl_init.cpp | 2 +-
src/server/pegasus_server_write.cpp | 8 +-
src/server/pegasus_write_service.cpp | 4 +-
src/server/pegasus_write_service_impl.h | 4 +-
src/shell/command_helper.h | 20 +-
src/shell/commands/data_operations.cpp | 2 +-
src/test/kill_test/data_verifier.cpp | 3 +-
src/test/kill_test/killer_handler_shell.cpp | 2 +-
src/test/kill_test/process_kill_testor.cpp | 2 +-
src/test/pressure_test/main.cpp | 2 +-
src/utils/alloc.cpp | 2 +-
src/utils/alloc.h | 21 +-
src/utils/command_manager.cpp | 7 +-
src/utils/filesystem.cpp | 13 +-
src/utils/fmt_logging.h | 95 +++-
src/utils/long_adder.cpp | 2 +-
src/utils/math.cpp | 9 +-
src/utils/metrics.cpp | 9 +-
src/utils/metrics.h | 9 +-
src/utils/nth_element.h | 2 +-
src/utils/output_utils.cpp | 8 +-
src/utils/rpc_address.h | 5 +
src/utils/test/metrics_test.cpp | 2 +-
src/utils/thread_access_checker.cpp | 9 +-
src/zookeeper/lock_struct.cpp | 24 +-
src/zookeeper/test/distributed_lock_zookeeper.cpp | 2 +-
src/zookeeper/zookeeper_session.cpp | 2 +-
118 files changed, 1121 insertions(+), 1356 deletions(-)
diff --git a/src/aio/aio_task.cpp b/src/aio/aio_task.cpp
index 019074e09..16e5308ab 100644
--- a/src/aio/aio_task.cpp
+++ b/src/aio/aio_task.cpp
@@ -32,9 +32,10 @@ aio_task::aio_task(dsn::task_code code, aio_handler &&cb, int hash, service_node
{
_is_null = (_cb == nullptr);
- dassert_f(TASK_TYPE_AIO == spec().type,
- "{} is not of AIO type, please use DEFINE_TASK_CODE_AIO to define the task code",
- spec().name);
+ CHECK_EQ_MSG(TASK_TYPE_AIO,
+ spec().type,
+ "{} is not of AIO type, please use DEFINE_TASK_CODE_AIO to define the task code",
+ spec().name);
set_error_code(ERR_IO_PENDING);
_aio_ctx = file::prepare_aio_context(this);
diff --git a/src/aio/disk_engine.cpp b/src/aio/disk_engine.cpp
index 9a7e602ee..9fef5d58d 100644
--- a/src/aio/disk_engine.cpp
+++ b/src/aio/disk_engine.cpp
@@ -107,7 +107,7 @@ aio_task *disk_file::write(aio_task *tsk, void *ctx)
aio_task *disk_file::on_read_completed(aio_task *wk, error_code err, size_t size)
{
- dassert(wk->next == nullptr, "");
+ CHECK(wk->next == nullptr, "");
auto ret = _read_queue.on_work_completed(wk, nullptr);
wk->enqueue(err, size);
wk->release_ref(); // added in above read
@@ -138,7 +138,7 @@ aio_task *disk_file::on_write_completed(aio_task *wk, void *ctx, error_code err,
}
if (err == ERR_OK) {
- dassert(size == 0, "written buffer size does not equal to input buffer's size");
+ CHECK_EQ_MSG(size, 0, "written buffer size does not equal to input buffer's size");
}
return ret;
diff --git a/src/base/pegasus_key_schema.h b/src/base/pegasus_key_schema.h
index c6f1f6e32..4790693ac 100644
--- a/src/base/pegasus_key_schema.h
+++ b/src/base/pegasus_key_schema.h
@@ -40,7 +40,7 @@ namespace pegasus {
template <typename T>
void pegasus_generate_key(::dsn::blob &key, const T &hash_key, const T &sort_key)
{
- dassert(hash_key.length() < UINT16_MAX, "hash key length must be less than UINT16_MAX");
+ CHECK_LT(hash_key.length(), UINT16_MAX);
int len = 2 + hash_key.length() + sort_key.length();
std::shared_ptr<char> buf(::dsn::utils::make_shared_array<char>(len));
@@ -64,7 +64,7 @@ void pegasus_generate_key(::dsn::blob &key, const T &hash_key, const T &sort_key
template <typename T>
void pegasus_generate_next_blob(::dsn::blob &next, const T &hash_key)
{
- dassert(hash_key.length() < UINT16_MAX, "hash key length must be less than UINT16_MAX");
+ CHECK_LT(hash_key.length(), UINT16_MAX);
int hash_key_len = hash_key.length();
std::shared_ptr<char> buf(::dsn::utils::make_shared_array<char>(hash_key_len + 2));
@@ -102,14 +102,13 @@ void pegasus_generate_next_blob(::dsn::blob &next, const T &hash_key, const T &s
inline void
pegasus_restore_key(const ::dsn::blob &key, ::dsn::blob &hash_key, ::dsn::blob &sort_key)
{
- dassert(key.length() >= 2, "key length must be no less than 2");
+ CHECK_GE(key.length(), 2);
// hash_key_len is in big endian
uint16_t hash_key_len = ::dsn::endian::ntoh(*(uint16_t *)(key.data()));
if (hash_key_len > 0) {
- dassert(key.length() >= 2 + hash_key_len,
- "key length must be no less than (2 + hash_key_len)");
+ CHECK_GE(key.length(), 2 + hash_key_len);
hash_key = key.range(2, hash_key_len);
} else {
hash_key = ::dsn::blob();
@@ -127,14 +126,13 @@ pegasus_restore_key(const ::dsn::blob &key, ::dsn::blob &hash_key, ::dsn::blob &
inline void
pegasus_restore_key(const ::dsn::blob &key, std::string &hash_key, std::string &sort_key)
{
- dassert(key.length() >= 2, "key length must be no less than 2");
+ CHECK_GE(key.length(), 2);
// hash_key_len is in big endian
uint16_t hash_key_len = ::dsn::endian::ntoh(*(uint16_t *)(key.data()));
if (hash_key_len > 0) {
- dassert(key.length() >= 2 + hash_key_len,
- "key length must be no less than (2 + hash_key_len)");
+ CHECK_GE(key.length(), 2 + hash_key_len);
hash_key.assign(key.data() + 2, hash_key_len);
} else {
hash_key.clear();
@@ -151,15 +149,14 @@ pegasus_restore_key(const ::dsn::blob &key, std::string &hash_key, std::string &
template <typename T>
inline uint64_t pegasus_key_hash(const T &key)
{
- dassert(key.size() >= 2, "key length must be no less than 2");
+ CHECK_GE(key.size(), 2);
// hash_key_len is in big endian
uint16_t hash_key_len = ::dsn::endian::ntoh(*(uint16_t *)(key.data()));
if (hash_key_len > 0) {
// hash_key_len > 0, compute hash from hash_key
- dassert(key.size() >= 2 + hash_key_len,
- "key length must be no less than (2 + hash_key_len)");
+ CHECK_GE(key.size(), 2 + hash_key_len);
return dsn::utils::crc64_calc(key.data() + 2, hash_key_len, 0);
} else {
// hash_key_len == 0, compute hash from sort_key
diff --git a/src/base/pegasus_value_schema.h b/src/base/pegasus_value_schema.h
index 76313d26e..6e48cb494 100644
--- a/src/base/pegasus_value_schema.h
+++ b/src/base/pegasus_value_schema.h
@@ -61,11 +61,7 @@ inline uint64_t extract_timestamp_from_timetag(uint64_t timetag)
/// \return expire_ts in host endian
inline uint32_t pegasus_extract_expire_ts(uint32_t version, dsn::string_view value)
{
- dassert_f(version <= PEGASUS_DATA_VERSION_MAX,
- "data version({}) must be <= {}",
- version,
- PEGASUS_DATA_VERSION_MAX);
-
+ CHECK_LE(version, PEGASUS_DATA_VERSION_MAX);
return dsn::data_input(value).read_u32();
}
@@ -76,10 +72,7 @@ inline uint32_t pegasus_extract_expire_ts(uint32_t version, dsn::string_view val
inline void
pegasus_extract_user_data(uint32_t version, std::string &&raw_value, ::dsn::blob &user_data)
{
- dassert_f(version <= PEGASUS_DATA_VERSION_MAX,
- "data version({}) must be <= {}",
- version,
- PEGASUS_DATA_VERSION_MAX);
+ CHECK_LE(version, PEGASUS_DATA_VERSION_MAX);
auto *s = new std::string(std::move(raw_value));
dsn::data_input input(*s);
@@ -110,7 +103,7 @@ inline uint64_t pegasus_extract_timetag(int version, dsn::string_view value)
inline void pegasus_update_expire_ts(uint32_t version, std::string &value, uint32_t new_expire_ts)
{
if (version == 0 || version == 1) {
- dassert_f(value.length() >= sizeof(uint32_t), "value must include 'expire_ts' header");
+ CHECK_GE_MSG(value.length(), sizeof(uint32_t), "value must include 'expire_ts' header");
new_expire_ts = dsn::endian::hton(new_expire_ts);
memcpy(const_cast<char *>(value.data()), &new_expire_ts, sizeof(uint32_t));
diff --git a/src/base/value_schema_v0.cpp b/src/base/value_schema_v0.cpp
index a86f8f3ba..ecea91f5a 100644
--- a/src/base/value_schema_v0.cpp
+++ b/src/base/value_schema_v0.cpp
@@ -86,7 +86,7 @@ std::unique_ptr<value_field> value_schema_v0::extract_timestamp(dsn::string_view
void value_schema_v0::update_expire_ts(std::string &value, std::unique_ptr<value_field> field)
{
- dassert_f(value.length() >= sizeof(uint32_t), "value must include 'expire_ts' header");
+ CHECK_GE_MSG(value.length(), sizeof(uint32_t), "value must include 'expire_ts' header");
auto expire_field = static_cast<expire_timestamp_field *>(field.get());
auto new_expire_ts = dsn::endian::hton(expire_field->expire_ts);
diff --git a/src/base/value_schema_v1.cpp b/src/base/value_schema_v1.cpp
index 7d28f18da..1d5024366 100644
--- a/src/base/value_schema_v1.cpp
+++ b/src/base/value_schema_v1.cpp
@@ -103,7 +103,7 @@ std::unique_ptr<value_field> value_schema_v1::extract_time_tag(dsn::string_view
void value_schema_v1::update_expire_ts(std::string &value, std::unique_ptr<value_field> field)
{
- dassert_f(value.length() >= sizeof(uint32_t), "value must include 'expire_ts' header");
+ CHECK_GE_MSG(value.length(), sizeof(uint32_t), "value must include 'expire_ts' header");
auto expire_field = static_cast<expire_timestamp_field *>(field.get());
auto new_expire_ts = dsn::endian::hton(expire_field->expire_ts);
diff --git a/src/base/value_schema_v2.cpp b/src/base/value_schema_v2.cpp
index 46fb2ca54..9b37a789b 100644
--- a/src/base/value_schema_v2.cpp
+++ b/src/base/value_schema_v2.cpp
@@ -108,8 +108,9 @@ std::unique_ptr<value_field> value_schema_v2::extract_time_tag(dsn::string_view
void value_schema_v2::update_expire_ts(std::string &value, std::unique_ptr<value_field> field)
{
- dassert_f(value.length() >= sizeof(uint32_t) + sizeof(uint8_t),
- "value must include 'expire_ts' header");
+ CHECK_GE_MSG(value.length(),
+ sizeof(uint32_t) + sizeof(uint8_t),
+ "value must include 'expire_ts' header");
auto expire_field = static_cast<expire_timestamp_field *>(field.get());
auto new_expire_ts = dsn::endian::hton(expire_field->expire_ts);
diff --git a/src/block_service/directio_writable_file.cpp b/src/block_service/directio_writable_file.cpp
index ca7b179f4..bf42c2d07 100644
--- a/src/block_service/directio_writable_file.cpp
+++ b/src/block_service/directio_writable_file.cpp
@@ -63,7 +63,7 @@ direct_io_writable_file::~direct_io_writable_file()
return;
}
// Here is an ensurance, users shuold call finalize manually
- dassert(_offset == 0, "finalize() should be called before destructor");
+ CHECK_EQ_MSG(_offset, 0, "finalize() should be called before destructor");
free(_buffer);
close(_fd);
@@ -92,7 +92,7 @@ bool direct_io_writable_file::initialize()
bool direct_io_writable_file::finalize()
{
- dassert(_buffer && _fd >= 0, "Initialize the instance first");
+ CHECK(_buffer && _fd >= 0, "Initialize the instance first");
if (_offset > 0) {
if (::write(_fd, _buffer, _buffer_size) != _buffer_size) {
@@ -108,7 +108,7 @@ bool direct_io_writable_file::finalize()
bool direct_io_writable_file::write(const char *s, size_t n)
{
- dassert(_buffer && _fd >= 0, "Initialize the instance first");
+ CHECK(_buffer && _fd >= 0, "Initialize the instance first");
uint32_t remaining = n;
while (remaining > 0) {
diff --git a/src/client_lib/pegasus_client_impl.cpp b/src/client_lib/pegasus_client_impl.cpp
index 19e92d642..089bc645d 100644
--- a/src/client_lib/pegasus_client_impl.cpp
+++ b/src/client_lib/pegasus_client_impl.cpp
@@ -50,7 +50,7 @@ pegasus_client_impl::pegasus_client_impl(const char *cluster_name, const char *a
std::vector<dsn::rpc_address> meta_servers;
dsn::replication::replica_helper::load_meta_servers(
meta_servers, PEGASUS_CLUSTER_SECTION_NAME.c_str(), cluster_name);
- dassert(meta_servers.size() > 0, "");
+ CHECK_GT(meta_servers.size(), 0);
_meta_server.assign_group("meta-servers");
_meta_server.group_address()->add_list(meta_servers);
diff --git a/src/common/duplication_common.cpp b/src/common/duplication_common.cpp
index 759cb637a..78a11236d 100644
--- a/src/common/duplication_common.cpp
+++ b/src/common/duplication_common.cpp
@@ -96,20 +96,22 @@ private:
for (std::string &cluster : clusters) {
int64_t cluster_id =
dsn_config_get_value_int64("duplication-group", cluster.data(), 0, "");
- dassert(cluster_id < 128 && cluster_id > 0,
- "cluster_id(%zd) for %s should be in [1, 127]",
- cluster_id,
- cluster.data());
+ CHECK(cluster_id < 128 && cluster_id > 0,
+ "cluster_id({}) for {} should be in [1, 127]",
+ cluster_id,
+ cluster);
_group.emplace(cluster, static_cast<uint8_t>(cluster_id));
}
- dassert_f(clusters.size() == _group.size(),
- "there might be duplicate cluster_name in configuration");
+ CHECK_EQ_MSG(clusters.size(),
+ _group.size(),
+ "there might be duplicate cluster_name in configuration");
for (const auto &kv : _group) {
_distinct_cids.insert(kv.second);
}
- dassert_f(_distinct_cids.size() == _group.size(),
- "there might be duplicate cluster_id in configuration");
+ CHECK_EQ_MSG(_distinct_cids.size(),
+ _group.size(),
+ "there might be duplicate cluster_id in configuration");
}
~duplication_group_registry() = default;
diff --git a/src/common/fs_manager.cpp b/src/common/fs_manager.cpp
index ec92caed3..e07a84934 100644
--- a/src/common/fs_manager.cpp
+++ b/src/common/fs_manager.cpp
@@ -281,11 +281,11 @@ void fs_manager::remove_replica(const gpid &pid)
unsigned remove_count = 0;
for (auto &n : _dir_nodes) {
unsigned r = n->remove(pid);
- dassert(remove_count + r <= 1,
- "gpid(%d.%d) found in dir(%s), which was removed before",
- pid.get_app_id(),
- pid.get_partition_index(),
- n->tag.c_str());
+ CHECK_LE_MSG(remove_count + r,
+ 1,
+ "gpid({}) found in dir({}), which was removed before",
+ pid,
+ n->tag);
if (r != 0) {
LOG_INFO("%s: remove gpid(%d.%d) from dir(%s)",
dsn_primary_address().to_string(),
diff --git a/src/common/replication_common.cpp b/src/common/replication_common.cpp
index cccb8056c..4f9ca354f 100644
--- a/src/common/replication_common.cpp
+++ b/src/common/replication_common.cpp
@@ -414,10 +414,7 @@ void replication_options::initialize()
void replication_options::sanity_check()
{
- dassert(max_mutation_count_in_prepare_list >= staleness_for_commit,
- "%d VS %d",
- max_mutation_count_in_prepare_list,
- staleness_for_commit);
+ CHECK_GE(max_mutation_count_in_prepare_list, staleness_for_commit);
}
int32_t replication_options::app_mutation_2pc_min_replica_count(int32_t app_max_replica_count) const
@@ -478,11 +475,12 @@ bool replica_helper::load_meta_servers(/*out*/ std::vector<dsn::rpc_address> &se
std::vector<std::string> hostname_port;
uint32_t ip = 0;
utils::split_args(s.c_str(), hostname_port, ':');
- dassert_f(2 == hostname_port.size(),
- "invalid address '{}' specified in config [{}].{}",
- s.c_str(),
- section,
- key);
+ CHECK_EQ_MSG(2,
+ hostname_port.size(),
+ "invalid address '{}' specified in config [{}].{}",
+ s,
+ section,
+ key);
uint32_t port_num = 0;
CHECK(dsn::internal::buf2unsigned(hostname_port[1], port_num) && port_num < UINT16_MAX,
"invalid address '{}' specified in config [{}].{}",
diff --git a/src/common/serialization_helper/thrift_helper.h b/src/common/serialization_helper/thrift_helper.h
index 16b6ede83..2ada8c63a 100644
--- a/src/common/serialization_helper/thrift_helper.h
+++ b/src/common/serialization_helper/thrift_helper.h
@@ -188,8 +188,8 @@ inline uint32_t rpc_address::read(apache::thrift::protocol::TProtocol *iprot)
if (binary_proto != nullptr) {
// the protocol is binary protocol
auto r = iprot->readI64(reinterpret_cast<int64_t &>(_addr.value));
- dassert(_addr.v4.type == HOST_TYPE_INVALID || _addr.v4.type == HOST_TYPE_IPV4,
- "only invalid or ipv4 can be deserialized from binary");
+ CHECK(_addr.v4.type == HOST_TYPE_INVALID || _addr.v4.type == HOST_TYPE_IPV4,
+ "only invalid or ipv4 can be deserialized from binary");
return r;
} else {
// the protocol is json protocol
@@ -247,8 +247,8 @@ inline uint32_t rpc_address::write(apache::thrift::protocol::TProtocol *oprot) c
dynamic_cast<apache::thrift::protocol::TBinaryProtocol *>(oprot);
if (binary_proto != nullptr) {
// the protocol is binary protocol
- dassert(_addr.v4.type == HOST_TYPE_INVALID || _addr.v4.type == HOST_TYPE_IPV4,
- "only invalid or ipv4 can be serialized to binary");
+ CHECK(_addr.v4.type == HOST_TYPE_INVALID || _addr.v4.type == HOST_TYPE_IPV4,
+ "only invalid or ipv4 can be serialized to binary");
return oprot->writeI64((int64_t)_addr.value);
} else {
// the protocol is json protocol
diff --git a/src/common/storage_serverlet.h b/src/common/storage_serverlet.h
index e1d43a62b..51119ad94 100644
--- a/src/common/storage_serverlet.h
+++ b/src/common/storage_serverlet.h
@@ -105,10 +105,10 @@ protected:
dassert(s_handlers.emplace(name, h).second, "handler %s has already been registered", name);
s_vhandlers.resize(rpc_code + 1);
- dassert(s_vhandlers[rpc_code] == nullptr,
- "handler %s(%d) has already been registered",
- rpc_code.to_string(),
- rpc_code.code());
+ CHECK(s_vhandlers[rpc_code] == nullptr,
+ "handler {}({}) has already been registered",
+ rpc_code,
+ rpc_code.code());
s_vhandlers[rpc_code] = h;
return true;
}
diff --git a/src/failure_detector/failure_detector_multimaster.cpp b/src/failure_detector/failure_detector_multimaster.cpp
index 28fbeb4ec..0fae1fb1f 100644
--- a/src/failure_detector/failure_detector_multimaster.cpp
+++ b/src/failure_detector/failure_detector_multimaster.cpp
@@ -85,10 +85,7 @@ void slave_failure_detector_with_multimaster::end_ping(::dsn::error_code err,
if (!failure_detector::end_ping_internal(err, ack))
return;
- dassert(ack.this_node == _meta_servers.group_address()->leader(),
- "ack.this_node[%s] vs meta_servers.leader[%s]",
- ack.this_node.to_string(),
- _meta_servers.group_address()->leader().to_string());
+ CHECK_EQ(ack.this_node, _meta_servers.group_address()->leader());
if (ERR_OK != err) {
rpc_address next = _meta_servers.group_address()->next(ack.this_node);
diff --git a/src/geo/lib/geo_client.cpp b/src/geo/lib/geo_client.cpp
index 8497f4c94..26ed99644 100644
--- a/src/geo/lib/geo_client.cpp
+++ b/src/geo/lib/geo_client.cpp
@@ -81,10 +81,7 @@ geo_client::geo_client(const char *config_file,
_max_level = (int32_t)dsn_config_get_value_uint64(
"geo_client.lib", "max_level", 16, "max cell level for scan");
- dassert_f(_min_level < _max_level,
- "_min_level({}) must be less than _max_level({})",
- _min_level,
- _max_level);
+ CHECK_LT(_min_level, _max_level);
uint32_t latitude_index = (uint32_t)dsn_config_get_value_uint64(
"geo_client.lib", "latitude_index", 5, "latitude index in value");
diff --git a/src/http/http_message_parser.cpp b/src/http/http_message_parser.cpp
index 4b96090a7..11653b4e4 100644
--- a/src/http/http_message_parser.cpp
+++ b/src/http/http_message_parser.cpp
@@ -215,7 +215,7 @@ void http_message_parser::prepare_on_send(message_ex *msg)
int dsn_buf_count = 0;
while (dsn_size > 0 && dsn_buf_count < buffers.size()) {
blob &buf = buffers[dsn_buf_count];
- dassert(dsn_size >= buf.length(), "%u VS %u", dsn_size, buf.length());
+ CHECK_GE(dsn_size, buf.length());
dsn_size -= buf.length();
++dsn_buf_count;
}
diff --git a/src/meta/greedy_load_balancer.cpp b/src/meta/greedy_load_balancer.cpp
index fd6e85696..cc2261bc5 100644
--- a/src/meta/greedy_load_balancer.cpp
+++ b/src/meta/greedy_load_balancer.cpp
@@ -172,8 +172,8 @@ bool greedy_load_balancer::all_replica_infos_collected(const node_state &ns)
void greedy_load_balancer::greedy_balancer(const bool balance_checker)
{
- dassert(t_alive_nodes >= FLAGS_min_live_node_count_for_unfreeze,
- "too few nodes will be freezed");
+ CHECK_GE_MSG(
+ t_alive_nodes, FLAGS_min_live_node_count_for_unfreeze, "too few nodes will be freezed");
for (auto &kv : *(t_global_view->nodes)) {
node_state &ns = kv.second;
diff --git a/src/meta/load_balance_policy.cpp b/src/meta/load_balance_policy.cpp
index a7bce5db8..05c4096b3 100644
--- a/src/meta/load_balance_policy.cpp
+++ b/src/meta/load_balance_policy.cpp
@@ -191,8 +191,9 @@ void load_balance_policy::init(const meta_view *global_view, migration_list *lis
bool load_balance_policy::primary_balance(const std::shared_ptr<app_state> &app,
bool only_move_primary)
{
- dassert(_alive_nodes >= FLAGS_min_live_node_count_for_unfreeze,
- "too few alive nodes will lead to freeze");
+ CHECK_GE_MSG(_alive_nodes,
+ FLAGS_min_live_node_count_for_unfreeze,
+ "too few alive nodes will lead to freeze");
LOG_INFO_F("primary balancer for app({}:{})", app->app_name, app->app_id);
auto graph = ford_fulkerson::builder(app, *_global_view->nodes, address_id).build();
@@ -278,12 +279,13 @@ void load_balance_policy::start_moving_primary(const std::shared_ptr<app_state>
{
std::list<dsn::gpid> potential_moving = calc_potential_moving(app, from, to);
auto potential_moving_size = potential_moving.size();
- dassert_f(plan_moving <= potential_moving_size,
- "from({}) to({}) plan({}), can_move({})",
- from.to_string(),
- to.to_string(),
- plan_moving,
- potential_moving_size);
+ CHECK_LE_MSG(plan_moving,
+ potential_moving_size,
+ "from({}) to({}) plan({}), can_move({})",
+ from,
+ to,
+ plan_moving,
+ potential_moving_size);
while (plan_moving-- > 0) {
dsn::gpid selected = select_moving(potential_moving, prev_load, current_load, from, to);
diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp
index bcc4673c6..36d5ff1e4 100644
--- a/src/meta/meta_backup_service.cpp
+++ b/src/meta/meta_backup_service.cpp
@@ -63,10 +63,10 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id)
_backup_sig.c_str(),
app_id);
auto iter = _progress.unfinished_partitions_per_app.find(app_id);
- dassert(iter != _progress.unfinished_partitions_per_app.end(),
- "%s: can't find app(%d) in unfished_map",
- _backup_sig.c_str(),
- app_id);
+ CHECK(iter != _progress.unfinished_partitions_per_app.end(),
+ "{}: can't find app({}) in unfished_map",
+ _backup_sig,
+ app_id);
_progress.is_app_skipped[app_id] = true;
int total_partitions = iter->second;
for (int32_t pidx = 0; pidx < total_partitions; ++pidx) {
@@ -153,10 +153,10 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id)
void policy_context::start_backup_app_partitions_unlocked(int32_t app_id)
{
auto iter = _progress.unfinished_partitions_per_app.find(app_id);
- dassert(iter != _progress.unfinished_partitions_per_app.end(),
- "%s: can't find app(%d) in unfinished apps",
- _backup_sig.c_str(),
- app_id);
+ CHECK(iter != _progress.unfinished_partitions_per_app.end(),
+ "{}: can't find app({}) in unfinished apps",
+ _backup_sig,
+ app_id);
for (int32_t i = 0; i < iter->second; ++i) {
start_backup_partition_unlocked(gpid(app_id, i));
}
@@ -490,28 +490,23 @@ void policy_context::on_backup_reply(error_code err,
pid.to_string(),
primary.to_string());
if (err == dsn::ERR_OK && response.err == dsn::ERR_OK) {
- dassert(response.policy_name == _policy.policy_name,
- "policy name(%s vs %s) don't match, pid(%d.%d), replica_server(%s)",
- _policy.policy_name.c_str(),
- response.policy_name.c_str(),
- pid.get_app_id(),
- pid.get_partition_index(),
- primary.to_string());
- dassert(response.pid == pid,
- "%s: backup pid[(%d.%d) vs (%d.%d)] don't match, replica_server(%s)",
- _policy.policy_name.c_str(),
- response.pid.get_app_id(),
- response.pid.get_partition_index(),
- pid.get_app_id(),
- pid.get_partition_index(),
- primary.to_string());
- dassert(response.backup_id <= _cur_backup.backup_id,
- "%s: replica server(%s) has bigger backup_id(%lld), gpid(%d.%d)",
- _backup_sig.c_str(),
- primary.to_string(),
- response.backup_id,
- pid.get_app_id(),
- pid.get_partition_index());
+ CHECK_EQ_MSG(response.policy_name,
+ _policy.policy_name,
+ "policy names don't match, pid({}), replica_server({})",
+ pid,
+ primary);
+ CHECK_EQ_MSG(response.pid,
+ pid,
+ "{}: backup pids don't match, replica_server({})",
+ _policy.policy_name,
+ primary);
+ CHECK_LE_MSG(response.backup_id,
+ _cur_backup.backup_id,
+ "{}: replica server({}) has bigger backup_id({}), gpid({})",
+ _backup_sig,
+ primary,
+ response.backup_id,
+ pid);
if (response.backup_id < _cur_backup.backup_id) {
LOG_WARNING_F(
@@ -830,11 +825,13 @@ void policy_context::add_backup_history(const backup_info &info)
_policy.policy_name.c_str(),
info.backup_id,
info.start_time_ms);
- dassert(_cur_backup.start_time_ms == 0,
- "%s: shouldn't have multiple unfinished backup instance in a policy, %lld vs %lld",
- _policy.policy_name.c_str(),
- _cur_backup.backup_id,
- info.backup_id);
+
+ CHECK_EQ_MSG(_cur_backup.start_time_ms,
+ 0,
+ "{}: shouldn't have multiple unfinished backup instance in a policy, {} vs {}",
+ _policy.policy_name,
+ _cur_backup.backup_id,
+ info.backup_id);
CHECK(_backup_history.empty() || info.backup_id > _backup_history.rbegin()->first,
"{}: backup_id({}) in history larger than current({})",
_policy.policy_name,
diff --git a/src/meta/meta_data.cpp b/src/meta/meta_data.cpp
index ddff68ef3..5990226b2 100644
--- a/src/meta/meta_data.cpp
+++ b/src/meta/meta_data.cpp
@@ -118,9 +118,8 @@ void maintain_drops(std::vector<rpc_address> &drops, const rpc_address &node, co
drops.erase(it);
}
} else {
- dassert(it == drops.end(),
- "the node(%s) cannot be in drops set before this update",
- node.to_string());
+ CHECK(
+ it == drops.end(), "the node({}) cannot be in drops set before this update", node);
drops.push_back(node);
if (drops.size() > 3) {
drops.erase(drops.begin());
@@ -135,12 +134,8 @@ bool construct_replica(meta_view view, const gpid &pid, int max_replica_count)
partition_configuration &pc = *get_config(*view.apps, pid);
config_context &cc = *get_config_context(*view.apps, pid);
- dassert(replica_count(pc) == 0,
- "replica count of gpid(%d.%d) must be 0",
- pid.get_app_id(),
- pid.get_partition_index());
- dassert(
- max_replica_count > 0, "max replica count is %d, should be at lease 1", max_replica_count);
+ CHECK_EQ_MSG(replica_count(pc), 0, "replica count of gpid({}) must be 0", pid);
+ CHECK_GT(max_replica_count, 0);
std::vector<dropped_replica> &drop_list = cc.dropped;
if (drop_list.empty()) {
@@ -544,11 +539,12 @@ void app_state_helper::reset_manual_compact_status()
bool app_state_helper::get_manual_compact_progress(/*out*/ int32_t &progress) const
{
int32_t total_replica_count = owner->partition_count * owner->max_replica_count;
- dassert_f(total_replica_count > 0,
- "invalid app metadata, app({}), partition_count({}), max_replica_count({})",
- owner->app_name,
- owner->partition_count,
- owner->max_replica_count);
+ CHECK_GT_MSG(total_replica_count,
+ 0,
+ "invalid app metadata, app({}), partition_count({}), max_replica_count({})",
+ owner->app_name,
+ owner->partition_count,
+ owner->max_replica_count);
int32_t finish_count = 0, idle_count = 0;
for (const auto &cc : contexts) {
for (const auto &r : cc.serving) {
@@ -671,11 +667,7 @@ bool node_state::for_each_primary(app_id id, const std::function<bool(const gpid
return true;
}
for (const gpid &pid : *pri) {
- dassert(id == pid.get_app_id(),
- "invalid gpid(%d.%d), app_id must be %d",
- pid.get_app_id(),
- pid.get_partition_index(),
- id);
+ CHECK_EQ_MSG(id, pid.get_app_id(), "invalid gpid({}), app_id must be {}", pid, id);
if (!f(pid))
return false;
}
@@ -689,11 +681,7 @@ bool node_state::for_each_partition(app_id id, const std::function<bool(const gp
return true;
}
for (const gpid &pid : *par) {
- dassert(id == pid.get_app_id(),
- "invalid gpid(%d.%d), app_id must be %d",
- pid.get_app_id(),
- pid.get_partition_index(),
- id);
+ CHECK_EQ_MSG(id, pid.get_app_id(), "invalid gpid({}), app_id must be {}", pid, id);
if (!f(pid))
return false;
}
diff --git a/src/meta/meta_http_service.cpp b/src/meta/meta_http_service.cpp
index 276f6ffed..c473552eb 100644
--- a/src/meta/meta_http_service.cpp
+++ b/src/meta/meta_http_service.cpp
@@ -292,14 +292,8 @@ void meta_http_service::list_app_handler(const http_request &req, http_response
configuration_query_by_index_response response;
request.app_name = info.app_name;
_service->_state->query_configuration_by_index(request, response);
- dassert(info.app_id == response.app_id,
- "invalid app_id, %d VS %d",
- info.app_id,
- response.app_id);
- dassert(info.partition_count == response.partition_count,
- "invalid partition_count, %d VS %d",
- info.partition_count,
- response.partition_count);
+ CHECK_EQ(info.app_id, response.app_id);
+ CHECK_EQ(info.partition_count, response.partition_count);
int fully_healthy = 0;
int write_unhealthy = 0;
int read_unhealthy = 0;
@@ -391,14 +385,8 @@ void meta_http_service::list_node_handler(const http_request &req, http_response
configuration_query_by_index_response response_app;
request_app.app_name = app.app_name;
_service->_state->query_configuration_by_index(request_app, response_app);
- dassert(app.app_id == response_app.app_id,
- "invalid app_id, %d VS %d",
- app.app_id,
- response_app.app_id);
- dassert(app.partition_count == response_app.partition_count,
- "invalid partition_count, %d VS %d",
- app.partition_count,
- response_app.partition_count);
+ CHECK_EQ(app.app_id, response_app.app_id);
+ CHECK_EQ(app.partition_count, response_app.partition_count);
for (int i = 0; i < response_app.partitions.size(); i++) {
const dsn::partition_configuration &p = response_app.partitions[i];
diff --git a/src/meta/meta_server_failure_detector.cpp b/src/meta/meta_server_failure_detector.cpp
index 1fc0e7fc0..6f5f830cf 100644
--- a/src/meta/meta_server_failure_detector.cpp
+++ b/src/meta/meta_server_failure_detector.cpp
@@ -58,7 +58,7 @@ meta_server_failure_detector::meta_server_failure_detector(meta_service *svc)
_lock_svc = dsn::utils::factory_store<dist::distributed_lock_service>::create(
_fd_opts->distributed_lock_service_type.c_str(), PROVIDER_TYPE_MAIN);
error_code err = _lock_svc->initialize(_fd_opts->distributed_lock_service_args);
- dassert(err == ERR_OK, "init distributed_lock_service failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(err, ERR_OK, "init distributed_lock_service failed");
}
meta_server_failure_detector::~meta_server_failure_detector()
@@ -200,10 +200,7 @@ void meta_server_failure_detector::leader_initialize(const std::string &lock_ser
CHECK(addr.from_string_ipv4(lock_service_owner.c_str()),
"parse {} to rpc_address failed",
lock_service_owner);
- dassert(addr == dsn_primary_address(),
- "acquire leader return success, but owner not match: %s vs %s",
- addr.to_string(),
- dsn_primary_address().to_string());
+ CHECK_EQ_MSG(addr, dsn_primary_address(), "acquire leader return success, but owner not match");
_is_leader.store(true);
_election_moment.store(dsn_now_ms());
}
diff --git a/src/meta/meta_split_service.cpp b/src/meta/meta_split_service.cpp
index 13936a6c8..4c2022c35 100644
--- a/src/meta/meta_split_service.cpp
+++ b/src/meta/meta_split_service.cpp
@@ -176,10 +176,11 @@ void meta_split_service::register_child_on_meta(register_child_rpc rpc)
app_name,
child_gpid);
const auto &child_config = app->partitions[child_gpid.get_partition_index()];
- dassert_f(child_config.ballot > 0,
- "app({}) partition({}) should have been registered",
- app_name,
- child_gpid);
+ CHECK_GT_MSG(child_config.ballot,
+ 0,
+ "app({}) partition({}) should have been registered",
+ app_name,
+ child_gpid);
response.err = ERR_CHILD_REGISTERED;
response.parent_config = parent_config;
return;
@@ -267,7 +268,7 @@ void meta_split_service::on_add_child_on_remote_storage_reply(error_code ec,
std::chrono::seconds(delay));
return;
}
- dassert_f(ec == ERR_OK, "we can't handle this right now, err = {}", ec);
+ CHECK_EQ_MSG(ec, ERR_OK, "we can't handle this right now");
LOG_INFO_F("parent({}) resgiter child({}) on remote storage succeed", parent_gpid, child_gpid);
@@ -513,10 +514,7 @@ void meta_split_service::notify_stop_split(notify_stop_split_rpc rpc)
}
// canceling split
- dassert_f(request.partition_count * 2 == app->partition_count,
- "wrong partition_count, request({}) vs meta({})",
- request.partition_count,
- app->partition_count);
+ CHECK_EQ_MSG(request.partition_count * 2, app->partition_count, "wrong partition_count");
app->helpers->split_states.status.erase(request.parent_gpid.get_partition_index());
response.err = ERR_OK;
// when all partitions finish, partition_count should be updated
@@ -563,9 +561,10 @@ void meta_split_service::query_child_state(query_child_state_rpc rpc)
return;
}
- dassert_f(app->partition_count == request.partition_count * 2,
- "app({}) has invalid partition_count",
- app_name);
+ CHECK_EQ_MSG(app->partition_count,
+ request.partition_count * 2,
+ "app({}) has invalid partition_count",
+ app_name);
auto child_pidx = parent_pid.get_partition_index() + request.partition_count;
if (app->partitions[child_pidx].ballot == invalid_ballot) {
diff --git a/src/meta/meta_state_service_simple.cpp b/src/meta/meta_state_service_simple.cpp
index 21a8771dc..9e8a8a408 100644
--- a/src/meta/meta_state_service_simple.cpp
+++ b/src/meta/meta_state_service_simple.cpp
@@ -157,7 +157,7 @@ error_code meta_state_service_simple::delete_node_internal(const std::string &no
auto &node_pair = delete_stack.top();
if (node_pair.node->children.end() == node_pair.next_child_to_delete) {
auto delnum = _quick_map.erase(node_pair.path);
- dassert(delnum == 1, "inconsistent state between quick map and tree");
+ CHECK_EQ_MSG(delnum, 1, "inconsistent state between quick map and tree");
delete node_pair.node;
delete_stack.pop();
} else {
@@ -176,11 +176,11 @@ error_code meta_state_service_simple::delete_node_internal(const std::string &no
}
auto parent_it = _quick_map.find(parent);
- dassert(parent_it != _quick_map.end(), "unable to find parent node");
+ CHECK(parent_it != _quick_map.end(), "unable to find parent node");
// XXX we cannot delete root, right?
auto erase_num = parent_it->second->children.erase(name);
- dassert(erase_num == 1, "inconsistent state between quick map and tree");
+ CHECK_EQ_MSG(erase_num, 1, "inconsistent state between quick map and tree");
return ERR_OK;
}
@@ -218,7 +218,7 @@ error_code meta_state_service_simple::apply_transaction(
default:
CHECK(false, "unsupported operation");
}
- dassert(ec == ERR_OK, "unexpected error when applying, err=%s", ec.to_string());
+ CHECK_EQ_MSG(ec, ERR_OK, "unexpected error when applying");
}
return ERR_OK;
@@ -392,7 +392,7 @@ task_ptr meta_state_service_simple::submit_transaction(
memcpy(dest, entry.data(), entry.length());
dest += entry.length();
});
- dassert(dest - batch.get() == total_size, "memcpy error");
+ CHECK_EQ_MSG(dest - batch.get(), total_size, "memcpy error");
task_ptr task(new error_code_future(cb_code, cb_transaction, 0));
task->set_tracker(tracker);
write_log(blob(batch, total_size),
diff --git a/src/meta/server_load_balancer.cpp b/src/meta/server_load_balancer.cpp
index 3ed7758e3..68b275d13 100644
--- a/src/meta/server_load_balancer.cpp
+++ b/src/meta/server_load_balancer.cpp
@@ -97,13 +97,13 @@ void newly_partitions::newly_add_partition(int32_t app_id)
void newly_partitions::newly_remove_primary(int32_t app_id, bool only_primary)
{
auto iter = primaries.find(app_id);
- dassert(iter != primaries.end(), "invalid app_id, app_id = %d", app_id);
- dassert(iter->second > 0, "invalid primary count, cnt = %d", iter->second);
+ CHECK(iter != primaries.end(), "invalid app_id, app_id = {}", app_id);
+ CHECK_GT_MSG(iter->second, 0, "invalid primary count");
if (0 == (--iter->second)) {
primaries.erase(iter);
}
- dassert(total_primaries > 0, "invalid total primaires = %d", total_primaries);
+ CHECK_GT_MSG(total_primaries, 0, "invalid total primaires");
--total_primaries;
if (!only_primary) {
@@ -114,13 +114,13 @@ void newly_partitions::newly_remove_primary(int32_t app_id, bool only_primary)
void newly_partitions::newly_remove_partition(int32_t app_id)
{
auto iter = partitions.find(app_id);
- dassert(iter != partitions.end(), "invalid app_id, app_id = %d", app_id);
- dassert(iter->second > 0, "invalid partition count, cnt = %d", iter->second);
+ CHECK(iter != partitions.end(), "invalid app_id, app_id = {}", app_id);
+ CHECK_GT_MSG(iter->second, 0, "invalid partition count");
if ((--iter->second) == 0) {
partitions.erase(iter);
}
- dassert(total_partitions > 0, "invalid total partitions = ", total_partitions);
+ CHECK_GT(total_partitions, 0);
--total_partitions;
}
diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp
index 607901ce8..4148a037a 100644
--- a/src/meta/server_state.cpp
+++ b/src/meta/server_state.cpp
@@ -319,8 +319,8 @@ error_code server_state::dump_app_states(const char *local_path,
file->append_buffer("binary", 6);
app_state *app;
while ((app = iterator()) != nullptr) {
- dassert(app->status == app_status::AS_AVAILABLE || app->status == app_status::AS_DROPPED,
- "invalid app status");
+ CHECK(app->status == app_status::AS_AVAILABLE || app->status == app_status::AS_DROPPED,
+ "invalid app status");
binary_writer writer;
dsn::marshall(writer, *app, DSF_THRIFT_BINARY);
file->append_buffer(writer.get_buffer());
@@ -391,13 +391,13 @@ error_code server_state::restore_from_local_storage(const char *local_path)
}
blob data;
- dassert(file->read_next_buffer(data) == 1, "read format header fail");
+ CHECK_EQ_MSG(file->read_next_buffer(data), 1, "read format header failed");
_all_apps.clear();
- dassert(memcmp(data.data(), "binary", 6) == 0, "");
+ CHECK_EQ(memcmp(data.data(), "binary", 6), 0);
while (true) {
int ans = file->read_next_buffer(data);
- dassert(ans != -1, "read file failed");
+ CHECK_NE_MSG(ans, -1, "read file failed");
if (ans == 0) // file end
break;
@@ -410,13 +410,14 @@ error_code server_state::restore_from_local_storage(const char *local_path)
for (unsigned int i = 0; i != app->partition_count; ++i) {
ans = file->read_next_buffer(data);
binary_reader reader(data);
- dassert(ans == 1, "unexpect read buffer, ret(%d)", ans);
+ CHECK_EQ_MSG(ans, 1, "unexpect read buffer");
unmarshall(reader, app->partitions[i], DSF_THRIFT_BINARY);
- dassert(app->partitions[i].pid.get_partition_index() == i,
- "uncorrect partition data, gpid(%d.%d), appname(%s)",
- app->app_id,
- i,
- app->app_name.c_str());
+ CHECK_EQ_MSG(app->partitions[i].pid.get_partition_index(),
+ i,
+ "uncorrect partition data, gpid({}.{}), appname({})",
+ app->app_id,
+ i,
+ app->app_name);
}
}
@@ -424,9 +425,7 @@ error_code server_state::restore_from_local_storage(const char *local_path)
if (iter.second->status == app_status::AS_AVAILABLE)
iter.second->status = app_status::AS_CREATING;
else {
- dassert(iter.second->status == app_status::AS_DROPPED,
- "invalid app_status, status = %s",
- enum_to_string(iter.second->status));
+ CHECK_EQ(iter.second->status, app_status::AS_DROPPED);
iter.second->status = app_status::AS_DROPPING;
}
}
@@ -470,9 +469,9 @@ error_code server_state::initialize_default_apps()
std::string envs_str = dsn_config_get_value_string(s, "envs", "", "app envs");
bool parse = dsn::utils::parse_kv_map(envs_str.c_str(), default_app.envs, ',', '=');
- dassert(default_app.app_type.length() > 0, "'[%s] app_type' not specified", s);
- dassert(default_app.partition_count > 0, "'[%s] partition_count' should > 0", s);
- dassert(parse, "'[%s] envs' is invalid, envs = %s", s, envs_str.c_str());
+ CHECK_GT_MSG(default_app.app_type.length(), 0, "'[{}] app_type' not specified", s);
+ CHECK_GT(default_app.partition_count, 0);
+ CHECK(parse, "'[{}] envs' is invalid, envs = {}", s, envs_str);
std::shared_ptr<app_state> app = app_state::create(default_app);
_all_apps.emplace(app->app_id, app);
@@ -493,9 +492,9 @@ error_code server_state::sync_apps_to_remote_storage()
_exist_apps.clear();
for (auto &kv_pair : _all_apps) {
if (kv_pair.second->status == app_status::AS_CREATING) {
- dassert(_exist_apps.find(kv_pair.second->app_name) == _exist_apps.end(),
- "invalid app name, name = %s",
- kv_pair.second->app_name.c_str());
+ CHECK(_exist_apps.find(kv_pair.second->app_name) == _exist_apps.end(),
+ "invalid app name, name = {}",
+ kv_pair.second->app_name);
_exist_apps.emplace(kv_pair.second->app_name, kv_pair.second);
}
}
@@ -524,8 +523,8 @@ error_code server_state::sync_apps_to_remote_storage()
std::shared_ptr<app_state> &app = kv.second;
std::string path = get_app_path(*app);
- dassert(app->status == app_status::AS_CREATING || app->status == app_status::AS_DROPPING,
- "invalid app status");
+ CHECK(app->status == app_status::AS_CREATING || app->status == app_status::AS_DROPPING,
+ "invalid app status");
blob value = app->to_json(app_status::AS_CREATING == app->status ? app_status::AS_AVAILABLE
: app_status::AS_DROPPED);
storage->create_node(path,
@@ -591,9 +590,9 @@ dsn::error_code server_state::sync_apps_from_remote_storage()
partition_configuration pc;
dsn::json::json_forwarder<partition_configuration>::decode(value, pc);
- dassert(pc.pid.get_app_id() == app->app_id &&
- pc.pid.get_partition_index() == partition_id,
- "invalid partition config");
+ CHECK(pc.pid.get_app_id() == app->app_id &&
+ pc.pid.get_partition_index() == partition_id,
+ "invalid partition config");
{
zauto_write_lock l(_lock);
app->partitions[partition_id] = pc;
@@ -655,8 +654,8 @@ dsn::error_code server_state::sync_apps_from_remote_storage()
[this, app_path, &err, &sync_partition](error_code ec, const blob &value) {
if (ec == ERR_OK) {
app_info info;
- dassert(dsn::json::json_forwarder<app_info>::decode(value, info),
- "invalid json data");
+ CHECK(dsn::json::json_forwarder<app_info>::decode(value, info),
+ "invalid json data");
std::shared_ptr<app_state> app = app_state::create(info);
{
zauto_write_lock l(_lock);
@@ -667,10 +666,10 @@ dsn::error_code server_state::sync_apps_from_remote_storage()
} else if (app->status == app_status::AS_DROPPED) {
app->status = app_status::AS_DROPPING;
} else {
- dassert(false,
- "invalid status(%s) for app(%s) in remote storage",
- enum_to_string(app->status),
- app->get_logname());
+ CHECK(false,
+ "invalid status({}) for app({}) in remote storage",
+ enum_to_string(app->status),
+ app->get_logname());
}
}
app->helpers->split_states.splitting_count = 0;
@@ -706,10 +705,10 @@ dsn::error_code server_state::sync_apps_from_remote_storage()
if (ERR_OBJECT_NOT_FOUND == err)
return err;
- dassert(ERR_OK == err, "can't handle this error (%s)", err.to_string());
- dassert(transaction_state == std::string(unlock_state) || transaction_state.empty(),
- "invalid transaction state(%s)",
- transaction_state.c_str());
+ CHECK_EQ_MSG(ERR_OK, err, "can't handle this error");
+ CHECK(transaction_state == std::string(unlock_state) || transaction_state.empty(),
+ "invalid transaction state({})",
+ transaction_state);
storage->get_children(
_apps_root,
@@ -745,7 +744,7 @@ void server_state::initialize_node_state()
ns->put_partition(pc.pid, true);
}
for (auto &ep : pc.secondaries) {
- dassert(!ep.is_invalid(), "invalid secondary address, addr = %s", ep.to_string());
+ CHECK(!ep.is_invalid(), "invalid secondary address, addr = {}", ep);
node_state *ns = get_node_state(_nodes, ep, true);
ns->put_partition(pc.pid, false);
}
@@ -774,9 +773,9 @@ error_code server_state::initialize_data_structure()
}
} else if (err == ERR_OK) {
if (_meta_svc->get_meta_options().recover_from_replica_server) {
- dassert(false,
- "find apps from remote storage, but "
- "[meta_server].recover_from_replica_server = true");
+ CHECK(false,
+ "find apps from remote storage, but "
+ "[meta_server].recover_from_replica_server = true");
} else {
LOG_INFO(
"sync apps from remote storage ok, get %d apps, init the node state accordingly",
@@ -825,7 +824,7 @@ void server_state::on_config_sync(configuration_query_by_node_rpc rpc)
response.partitions.resize(ns->partition_count());
ns->for_each_partition([&, this](const gpid &pid) {
std::shared_ptr<app_state> app = get_app(pid.get_app_id());
- dassert(app != nullptr, "invalid app_id, app_id = %d", pid.get_app_id());
+ CHECK(app, "invalid app_id, app_id = {}", pid.get_app_id());
config_context &cc = app->helpers->contexts[pid.get_partition_index()];
// config sync need the newest data to keep the perfect FD,
@@ -890,11 +889,11 @@ void server_state::on_config_sync(configuration_query_by_node_rpc rpc)
rep.pid);
} else {
// app is not recognized or partition is not recognized
- dassert(false,
- "gpid({}) on node({}) is not exist on meta server, administrator "
- "should check consistency of meta data",
- rep.pid,
- request.node.to_string());
+ CHECK(false,
+ "gpid({}) on node({}) is not exist on meta server, administrator "
+ "should check consistency of meta data",
+ rep.pid,
+ request.node);
}
} else if (app->status == app_status::AS_DROPPED) {
if (app->expire_second == 0) {
@@ -1049,11 +1048,11 @@ void server_state::init_app_partition_node(std::shared_ptr<app_state> &app,
0,
std::chrono::milliseconds(1000));
} else {
- dassert(false,
- "we can't handle this error in init app partition nodes err(%s), gpid(%d.%d)",
- ec.to_string(),
- app->app_id,
- pidx);
+ CHECK(false,
+ "we can't handle this error in init app partition nodes err({}), gpid({}.{})",
+ ec,
+ app->app_id,
+ pidx);
}
};
@@ -1080,7 +1079,7 @@ void server_state::do_app_create(std::shared_ptr<app_state> &app)
0,
std::chrono::seconds(1));
} else {
- dassert(false, "we can't handle this right now, err(%s)", ec.to_string());
+ CHECK(false, "we can't handle this right now, err({})", ec);
}
};
@@ -1204,7 +1203,7 @@ void server_state::do_app_drop(std::shared_ptr<app_state> &app)
0,
std::chrono::seconds(1));
} else {
- dassert(false, "we can't handle this, error(%s)", ec.to_string());
+ CHECK(false, "we can't handle this, error({})", ec);
}
};
@@ -1247,9 +1246,7 @@ void server_state::drop_app(dsn::message_ex *msg)
_meta_svc->get_meta_options().hold_seconds_for_dropped_app;
}
app->helpers->pending_response = msg;
- dassert(app->helpers->partitions_in_progress.load() == 0,
- "partition_in_progress_cnt = %d",
- app->helpers->partitions_in_progress.load());
+ CHECK_EQ(app->helpers->partitions_in_progress.load(), 0);
app->helpers->partitions_in_progress.store(app->partition_count);
break;
@@ -1261,8 +1258,7 @@ void server_state::drop_app(dsn::message_ex *msg)
response.err = ERR_BUSY_DROPPING;
break;
default:
- dassert(
- false, "invalid app status, status = %s", ::dsn::enum_to_string(app->status));
+ CHECK(false, "invalid app status, status = {}", ::dsn::enum_to_string(app->status));
break;
}
}
@@ -1325,9 +1321,7 @@ void server_state::recall_app(dsn::message_ex *msg)
do_recalling = true;
target_app->app_name = new_app_name;
target_app->status = app_status::AS_RECALLING;
- dassert(target_app->helpers->partitions_in_progress.load() == 0,
- "partition_in_progress_cnt = %d",
- target_app->helpers->partitions_in_progress.load());
+ CHECK_EQ(target_app->helpers->partitions_in_progress.load(), 0);
target_app->helpers->partitions_in_progress.store(target_app->partition_count);
target_app->helpers->pending_response = msg;
@@ -1393,54 +1387,39 @@ void server_state::request_check(const partition_configuration &old,
switch (request.type) {
case config_type::CT_ASSIGN_PRIMARY:
- dassert(old.primary != request.node,
- "%s VS %s",
- old.primary.to_string(),
- request.node.to_string());
- dassert(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) ==
- old.secondaries.end(),
- "");
+ CHECK_NE(old.primary, request.node);
+ CHECK(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) ==
+ old.secondaries.end(),
+ "");
break;
case config_type::CT_UPGRADE_TO_PRIMARY:
- dassert(old.primary != request.node,
- "%s VS %s",
- old.primary.to_string(),
- request.node.to_string());
- dassert(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) !=
- old.secondaries.end(),
- "");
+ CHECK_NE(old.primary, request.node);
+ CHECK(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) !=
+ old.secondaries.end(),
+ "");
break;
case config_type::CT_DOWNGRADE_TO_SECONDARY:
- dassert(old.primary == request.node,
- "%s VS %s",
- old.primary.to_string(),
- request.node.to_string());
- dassert(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) ==
- old.secondaries.end(),
- "");
+ CHECK_EQ(old.primary, request.node);
+ CHECK(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) ==
+ old.secondaries.end(),
+ "");
break;
case config_type::CT_DOWNGRADE_TO_INACTIVE:
case config_type::CT_REMOVE:
- dassert(old.primary == request.node ||
- std::find(old.secondaries.begin(), old.secondaries.end(), request.node) !=
- old.secondaries.end(),
- "");
+ CHECK(old.primary == request.node ||
+ std::find(old.secondaries.begin(), old.secondaries.end(), request.node) !=
+ old.secondaries.end(),
+ "");
break;
case config_type::CT_UPGRADE_TO_SECONDARY:
- dassert(old.primary != request.node,
- " %s VS %s",
- old.primary.to_string(),
- request.node.to_string());
- dassert(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) ==
- old.secondaries.end(),
- "");
+ CHECK_NE(old.primary, request.node);
+ CHECK(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) ==
+ old.secondaries.end(),
+ "");
break;
case config_type::CT_PRIMARY_FORCE_UPDATE_BALLOT:
- dassert(old.primary == new_config.primary,
- "%s VS %s",
- old.primary.to_string(),
- new_config.primary.to_string());
- dassert(old.secondaries == new_config.secondaries, "");
+ CHECK_EQ(old.primary, new_config.primary);
+ CHECK(old.secondaries == new_config.secondaries, "");
break;
default:
break;
@@ -1460,18 +1439,15 @@ void server_state::update_configuration_locally(
health_status new_health_status = partition_health_status(new_cfg, min_2pc_count);
if (app.is_stateful) {
- dassert(old_cfg.ballot == invalid_ballot || old_cfg.ballot + 1 == new_cfg.ballot,
- "invalid configuration update request, old ballot %" PRId64 ", new ballot %" PRId64
- "",
- old_cfg.ballot,
- new_cfg.ballot);
+ CHECK(old_cfg.ballot == invalid_ballot || old_cfg.ballot + 1 == new_cfg.ballot,
+ "invalid configuration update request, old ballot {}, new ballot {}",
+ old_cfg.ballot,
+ new_cfg.ballot);
node_state *ns = nullptr;
if (config_request->type != config_type::CT_DROP_PARTITION) {
ns = get_node_state(_nodes, config_request->node, false);
- dassert(ns != nullptr,
- "invalid node address, address = %s",
- config_request->node.to_string());
+ CHECK_NOTNULL(ns, "invalid node address, address = {}", config_request->node);
}
#ifndef NDEBUG
request_check(old_cfg, *config_request);
@@ -1508,7 +1484,7 @@ void server_state::update_configuration_locally(
case config_type::CT_ADD_SECONDARY:
case config_type::CT_ADD_SECONDARY_FOR_LB:
- dassert(false, "invalid execution work flow");
+ CHECK(false, "invalid execution work flow");
break;
case config_type::CT_REGISTER_CHILD: {
ns->put_partition(gpid, true);
@@ -1519,14 +1495,11 @@ void server_state::update_configuration_locally(
break;
}
default:
- dassert(false, "");
+ CHECK(false, "");
break;
}
} else {
- dassert(old_cfg.ballot == new_cfg.ballot,
- "invalid ballot, %" PRId64 " VS %" PRId64 "",
- old_cfg.ballot,
- new_cfg.ballot);
+ CHECK_EQ(old_cfg.ballot, new_cfg.ballot);
new_cfg = old_cfg;
partition_configuration_stateless pcs(new_cfg);
@@ -1543,9 +1516,7 @@ void server_state::update_configuration_locally(
}
auto it = _nodes.find(config_request->host_node);
- dassert(it != _nodes.end(),
- "invalid node address, address = %s",
- config_request->host_node.to_string());
+ CHECK(it != _nodes.end(), "invalid node address, address = {}", config_request->host_node);
if (config_type::CT_REMOVE == config_request->type) {
it->second.remove_partition(gpid, false);
} else {
@@ -1631,8 +1602,8 @@ void server_state::on_update_configuration_on_remote_reply(
config_context &cc = app->helpers->contexts[gpid.get_partition_index()];
// if multiple threads exist in the thread pool, the check may be failed
- dassert(app->status == app_status::AS_AVAILABLE || app->status == app_status::AS_DROPPING,
- "if app removed, this task should be cancelled");
+ CHECK(app->status == app_status::AS_AVAILABLE || app->status == app_status::AS_DROPPING,
+ "if app removed, this task should be cancelled");
if (ec == ERR_TIMEOUT) {
cc.pending_sync_task =
tasking::enqueue(LPC_META_STATE_HIGH,
@@ -1677,7 +1648,7 @@ void server_state::on_update_configuration_on_remote_reply(
}
}
} else {
- dassert(false, "we can't handle this right now, err = %s", ec.to_string());
+ CHECK(false, "we can't handle this right now, err = {}", ec);
}
}
@@ -1695,12 +1666,12 @@ void server_state::recall_partition(std::shared_ptr<app_state> &app, int pidx)
server_state::sStateHash,
std::chrono::seconds(1));
} else {
- dassert(false, "unable to handle this(%s) right now", error.to_string());
+ CHECK(false, "unable to handle this({}) right now", error);
}
};
partition_configuration &pc = app->partitions[pidx];
- dassert((pc.partition_flags & pc_flags::dropped), "");
+ CHECK((pc.partition_flags & pc_flags::dropped), "");
pc.partition_flags = 0;
blob json_partition = dsn::json::json_forwarder<partition_configuration>::encode(pc);
@@ -1732,7 +1703,7 @@ void server_state::drop_partition(std::shared_ptr<app_state> &app, int pidx)
request.config.primary.set_invalid();
request.config.secondaries.clear();
- dassert((pc.partition_flags & pc_flags::dropped) == 0, "");
+ CHECK_EQ((pc.partition_flags & pc_flags::dropped), 0);
request.config.partition_flags |= pc_flags::dropped;
// NOTICE this mis-understanding: if a old state is DDD, we may not need to udpate the ballot.
@@ -1764,10 +1735,11 @@ void server_state::downgrade_primary_to_inactive(std::shared_ptr<app_state> &app
if (config_status::pending_remote_sync == cc.stage) {
if (cc.pending_sync_request->type == config_type::CT_DROP_PARTITION) {
- dassert(app->status == app_status::AS_DROPPING,
- "app(%s) not in dropping state (%s)",
- app->get_logname(),
- enum_to_string(app->status));
+ CHECK_EQ_MSG(app->status,
+ app_status::AS_DROPPING,
+ "app({}) not in dropping state ({})",
+ app->get_logname(),
+ enum_to_string(app->status));
LOG_WARNING(
"stop downgrade primary as the partitions(%d.%d) is dropping", app->app_id, pidx);
return;
@@ -1806,7 +1778,7 @@ void server_state::downgrade_secondary_to_inactive(std::shared_ptr<app_state> &a
partition_configuration &pc = app->partitions[pidx];
config_context &cc = app->helpers->contexts[pidx];
- dassert(!pc.primary.is_invalid(), "this shouldn't be called if the primary is invalid");
+ CHECK(!pc.primary.is_invalid(), "this shouldn't be called if the primary is invalid");
if (config_status::pending_remote_sync != cc.stage) {
configuration_update_request request;
request.info = *app;
@@ -1844,7 +1816,7 @@ void server_state::downgrade_stateless_nodes(std::shared_ptr<app_state> &app,
break;
}
}
- dassert(!req->node.is_invalid(), "invalid node address, address = %s", req->node.to_string());
+ CHECK(!req->node.is_invalid(), "invalid node address, address = {}", req->node);
// remove host_node & node from secondaries/last_drops, as it will be sync to remote storage
for (++i; i < pc.secondaries.size(); ++i) {
pc.secondaries[i - 1] = pc.secondaries[i];
@@ -1880,8 +1852,8 @@ void server_state::on_update_configuration(
configuration_update_response response;
response.err = ERR_IO_PENDING;
- dassert(app != nullptr, "get get app for app id(%d)", gpid.get_app_id());
- dassert(app->is_stateful, "don't support stateless apps currently, id(%d)", gpid.get_app_id());
+ CHECK(app, "get get app for app id({})", gpid.get_app_id());
+ CHECK(app->is_stateful, "don't support stateless apps currently, id({})", gpid.get_app_id());
auto find_name = _config_type_VALUES_TO_NAMES.find(cfg_request->type);
if (find_name != _config_type_VALUES_TO_NAMES.end()) {
LOG_INFO("recv update config request: type(%s), %s",
@@ -1932,9 +1904,9 @@ void server_state::on_update_configuration(
_meta_svc->reply_data(msg, response);
msg->release_ref();
} else {
- dassert(config_status::not_pending == cc.stage,
- "invalid config status, cc.stage = %s",
- enum_to_string(cc.stage));
+ CHECK(config_status::not_pending == cc.stage,
+ "invalid config status, cc.stage = {}",
+ enum_to_string(cc.stage));
cc.stage = config_status::pending_remote_sync;
cc.pending_sync_request = cfg_request;
cc.msg = msg;
@@ -1961,9 +1933,7 @@ void server_state::on_partition_node_dead(std::shared_ptr<app_state> &app,
pc.pid.get_partition_index(),
address.to_string());
} else {
- dassert(false,
- "no primary/secondary on this node, node address = %s",
- address.to_string());
+ CHECK(false, "no primary/secondary on this node, node address = {}", address);
}
}
} else {
@@ -1985,9 +1955,9 @@ void server_state::on_change_node_state(rpc_address node, bool is_alive)
ns.set_replicas_collect_flag(false);
ns.for_each_partition([&, this](const dsn::gpid &pid) {
std::shared_ptr<app_state> app = get_app(pid.get_app_id());
- dassert(app != nullptr && app->status != app_status::AS_DROPPED,
- "invalid app, app_id = %d",
- pid.get_app_id());
+ CHECK(app != nullptr && app->status != app_status::AS_DROPPED,
+ "invalid app, app_id = {}",
+ pid.get_app_id());
on_partition_node_dead(app, pid.get_partition_index(), node);
return true;
});
@@ -2031,7 +2001,7 @@ server_state::construct_apps(const std::vector<query_app_info_response> &query_a
continue;
for (const app_info &info : query_resp.apps) {
- dassert(info.app_id >= 1, "invalid app_id, app_id = %d", info.app_id);
+ CHECK_GE_MSG(info.app_id, 1, "invalid app id");
auto iter = _all_apps.find(info.app_id);
if (iter == _all_apps.end()) {
std::shared_ptr<app_state> app = app_state::create(info);
@@ -2050,15 +2020,12 @@ server_state::construct_apps(const std::vector<query_app_info_response> &query_a
{
// compatible for app.duplicating different between primary and secondaries in
// 2.1.x, 2.2.x and 2.3.x release
- if (!app_info_compatible_equal(info, *old_info)) {
- dassert(
- false,
- "conflict app info from (%s) for id(%d): new_info(%s), old_info(%s)",
- replica_nodes[i].to_string(),
- info.app_id,
- boost::lexical_cast<std::string>(info).c_str(),
- boost::lexical_cast<std::string>(*old_info).c_str());
- }
+ CHECK(app_info_compatible_equal(info, *old_info),
+ "conflict app info from ({}) for id({}): new_info({}), old_info({})",
+ replica_nodes[i],
+ info.app_id,
+ boost::lexical_cast<std::string>(info),
+ boost::lexical_cast<std::string>(*old_info));
}
}
}
@@ -2092,7 +2059,7 @@ server_state::construct_apps(const std::vector<query_app_info_response> &query_a
// check conflict table name
std::map<std::string, int32_t> checked_names;
for (int app_id = max_app_id; app_id >= 1; --app_id) {
- dassert(_all_apps.find(app_id) != _all_apps.end(), "invalid app_id, app_id = %d", app_id);
+ CHECK(_all_apps.find(app_id) != _all_apps.end(), "invalid app_id, app_id = {}", app_id);
std::shared_ptr<app_state> &app = _all_apps[app_id];
std::string old_name = app->app_name;
while (checked_names.find(app->app_name) != checked_names.end()) {
@@ -2128,7 +2095,7 @@ error_code server_state::construct_partitions(
continue;
for (replica_info &r : query_resp.replicas) {
- dassert(_all_apps.find(r.pid.get_app_id()) != _all_apps.end(), "");
+ CHECK(_all_apps.find(r.pid.get_app_id()) != _all_apps.end(), "");
bool is_accepted = collect_replica({&_all_apps, &_nodes}, replica_nodes[i], r);
if (is_accepted) {
LOG_INFO("accept replica(%s) from node(%s)",
@@ -2146,9 +2113,9 @@ error_code server_state::construct_partitions(
int failed_count = 0;
for (auto &app_kv : _all_apps) {
std::shared_ptr<app_state> &app = app_kv.second;
- dassert(app->status == app_status::AS_CREATING || app->status == app_status::AS_DROPPING,
- "invalid app status, status = %s",
- enum_to_string(app->status));
+ CHECK(app->status == app_status::AS_CREATING || app->status == app_status::AS_DROPPING,
+ "invalid app status, status = {}",
+ enum_to_string(app->status));
if (app->status == app_status::AS_DROPPING) {
LOG_INFO("ignore constructing partitions for dropping app(%d)", app->app_id);
} else {
@@ -2347,12 +2314,11 @@ void server_state::on_start_recovery(const configuration_recovery_request &req,
}
resp.err = sync_apps_to_remote_storage();
- if (resp.err != dsn::ERR_OK) {
- dassert(false,
- "sync apps to remote storage failed when do recovery, err = %s, "
- "need to manually clear things from remote storage and restart the service",
- resp.err.to_string());
- }
+ CHECK_EQ_MSG(resp.err,
+ dsn::ERR_OK,
+ "sync apps to remote storage failed when do recovery, need "
+ "to manually clear contents from remote storage and "
+ "restart the service");
initialize_node_state();
}
@@ -2606,10 +2572,7 @@ void server_state::get_cluster_balance_score(double &primary_stddev, double &tot
void server_state::check_consistency(const dsn::gpid &gpid)
{
auto iter = _all_apps.find(gpid.get_app_id());
- dassert(iter != _all_apps.end(),
- "invalid gpid(%d.%d)",
- gpid.get_app_id(),
- gpid.get_partition_index());
+ CHECK(iter != _all_apps.end(), "invalid gpid({})", gpid);
app_state &app = *(iter->second);
partition_configuration &config = app.partitions[gpid.get_partition_index()];
@@ -2617,44 +2580,30 @@ void server_state::check_consistency(const dsn::gpid &gpid)
if (app.is_stateful) {
if (config.primary.is_invalid() == false) {
auto it = _nodes.find(config.primary);
- dassert(it != _nodes.end(),
- "invalid primary address, address = %s",
- config.primary.to_string());
- dassert(it->second.served_as(gpid) == partition_status::PS_PRIMARY,
- "node should serve as PS_PRIMARY, but status = %s",
- dsn::enum_to_string(it->second.served_as(gpid)));
-
- auto it2 =
- std::find(config.last_drops.begin(), config.last_drops.end(), config.primary);
- dassert(it2 == config.last_drops.end(),
- "primary shouldn't appear in last_drops, address = %s",
- config.primary.to_string());
+ CHECK(it != _nodes.end(), "invalid primary address, address = {}", config.primary);
+ CHECK_EQ(it->second.served_as(gpid), partition_status::PS_PRIMARY);
+ CHECK(std::find(config.last_drops.begin(), config.last_drops.end(), config.primary) ==
+ config.last_drops.end(),
+ "primary shouldn't appear in last_drops, address = {}",
+ config.primary);
}
for (auto &ep : config.secondaries) {
auto it = _nodes.find(ep);
- dassert(it != _nodes.end(), "invalid secondary address, address = %s", ep.to_string());
- dassert(it->second.served_as(gpid) == partition_status::PS_SECONDARY,
- "node should serve as PS_SECONDARY, but status = %s",
- dsn::enum_to_string(it->second.served_as(gpid)));
-
- auto it2 = std::find(config.last_drops.begin(), config.last_drops.end(), ep);
- dassert(it2 == config.last_drops.end(),
- "secondary shouldn't appear in last_drops, address = %s",
- ep.to_string());
+ CHECK(it != _nodes.end(), "invalid secondary address, address = {}", ep);
+ CHECK_EQ(it->second.served_as(gpid), partition_status::PS_SECONDARY);
+ CHECK(std::find(config.last_drops.begin(), config.last_drops.end(), ep) ==
+ config.last_drops.end(),
+ "secondary shouldn't appear in last_drops, address = {}",
+ ep);
}
} else {
partition_configuration_stateless pcs(config);
- dassert(pcs.hosts().size() == pcs.workers().size(),
- "%d VS %d",
- pcs.hosts().size(),
- pcs.workers().size());
+ CHECK_EQ(pcs.hosts().size(), pcs.workers().size());
for (auto &ep : pcs.hosts()) {
auto it = _nodes.find(ep);
- dassert(it != _nodes.end(), "invalid host, address = %s", ep.to_string());
- dassert(it->second.served_as(gpid) == partition_status::PS_SECONDARY,
- "node should serve as PS_SECONDARY, but status = %s",
- dsn::enum_to_string(it->second.served_as(gpid)));
+ CHECK(it != _nodes.end(), "invalid host, address = {}", ep);
+ CHECK_EQ(it->second.served_as(gpid), partition_status::PS_SECONDARY);
}
}
}
@@ -2693,7 +2642,7 @@ void server_state::do_update_app_info(const std::string &app_path,
0,
std::chrono::seconds(1));
} else {
- dassert(false, "we can't handle this, error(%s)", ec.to_string());
+ CHECK(false, "we can't handle this, error({})", ec);
}
};
// TODO(cailiuyang): callback scheduling order may be undefined if multiple requests are
@@ -2751,8 +2700,7 @@ void server_state::set_app_envs(const app_env_rpc &env_rpc)
ainfo.envs[keys[idx]] = values[idx];
}
do_update_app_info(app_path, ainfo, [this, app_name, keys, values, env_rpc](error_code ec) {
- dassert(
- ec == ERR_OK, "update app_info to remote storage failed with err = %s", ec.to_string());
+ CHECK_EQ_MSG(ec, ERR_OK, "update app info to remote storage failed");
zauto_write_lock l(_lock);
std::shared_ptr<app_state> app = get_app(app_name);
@@ -2824,8 +2772,7 @@ void server_state::del_app_envs(const app_env_rpc &env_rpc)
}
do_update_app_info(app_path, ainfo, [this, app_name, keys, env_rpc](error_code ec) {
- dassert(
- ec == ERR_OK, "update app_info to remote storage failed with err = %s", ec.to_string());
+ CHECK_EQ_MSG(ec, ERR_OK, "update app info to remote storage failed");
zauto_write_lock l(_lock);
std::shared_ptr<app_state> app = get_app(app_name);
@@ -2917,9 +2864,7 @@ void server_state::clear_app_envs(const app_env_rpc &env_rpc)
do_update_app_info(
app_path, ainfo, [this, app_name, prefix, erase_keys, env_rpc](error_code ec) {
- dassert(ec == ERR_OK,
- "update app_info to remote storage failed with err = %s",
- ec.to_string());
+ CHECK_EQ_MSG(ec, ERR_OK, "update app info to remote storage failed");
zauto_write_lock l(_lock);
std::shared_ptr<app_state> app = get_app(app_name);
@@ -3102,7 +3047,7 @@ void server_state::update_compaction_envs_on_remote_storage(start_manual_compact
ainfo.envs[keys[idx]] = values[idx];
}
do_update_app_info(app_path, ainfo, [this, app_name, keys, values, rpc](error_code ec) {
- dassert_f(ec == ERR_OK, "update app_info to remote storage failed with err = {}", ec);
+ CHECK_EQ_MSG(ec, ERR_OK, "update app_info to remote storage failed");
zauto_write_lock l(_lock);
auto app = get_app(app_name);
@@ -3374,16 +3319,17 @@ void server_state::set_max_replica_count_env_updating(std::shared_ptr<app_state>
zauto_write_lock l(_lock);
- dassert_f(ec == ERR_OK,
- "An error that can't be handled occurs while updating remote env of "
- "max_replica_count: error_code={}, app_name={}, app_id={}, "
- "new_max_replica_count={}, {}={}",
- ec.to_string(),
- app->app_name,
- app->app_id,
- new_max_replica_count,
- replica_envs::UPDATE_MAX_REPLICA_COUNT,
- app->envs[replica_envs::UPDATE_MAX_REPLICA_COUNT]);
+ CHECK_EQ_MSG(ec,
+ ERR_OK,
+ "An error that can't be handled occurs while updating remote env of "
+ "max_replica_count: error_code={}, app_name={}, app_id={}, "
+ "new_max_replica_count={}, {}={}",
+ ec,
+ app->app_name,
+ app->app_id,
+ new_max_replica_count,
+ replica_envs::UPDATE_MAX_REPLICA_COUNT,
+ app->envs[replica_envs::UPDATE_MAX_REPLICA_COUNT]);
app->envs[replica_envs::UPDATE_MAX_REPLICA_COUNT] =
fmt::format("updating;{}", new_max_replica_count);
@@ -3420,16 +3366,17 @@ void server_state::do_update_max_replica_count(std::shared_ptr<app_state> &app,
results->at(partition_index) = ec;
auto uncompleted = --app->helpers->partitions_in_progress;
- dassert_f(uncompleted >= 0,
- "the uncompleted number should be >= 0 while updating partition-level"
- "max_replica_count: uncompleted={}, app_name={}, app_id={}, "
- "partition_index={}, partition_count={}, new_max_replica_count={}",
- uncompleted,
- app_name,
- app->app_id,
- partition_index,
- app->partition_count,
- new_max_replica_count);
+ CHECK_GE_MSG(uncompleted,
+ 0,
+ "the uncompleted number should be >= 0 while updating partition-level"
+ "max_replica_count: uncompleted={}, app_name={}, app_id={}, "
+ "partition_index={}, partition_count={}, new_max_replica_count={}",
+ uncompleted,
+ app_name,
+ app->app_id,
+ partition_index,
+ app->partition_count,
+ new_max_replica_count);
if (uncompleted > 0) {
return;
@@ -3499,28 +3446,30 @@ void server_state::update_app_max_replica_count(std::shared_ptr<app_state> &app,
zauto_write_lock l(_lock);
- dassert_f(ec == ERR_OK,
- "An error that can't be handled occurs while updating remote app-level "
- "max_replica_count: error_code={}, app_name={}, app_id={}, "
- "old_max_replica_count={}, new_max_replica_count={}, {}={}",
- ec.to_string(),
- app->app_name,
- app->app_id,
- old_max_replica_count,
- new_max_replica_count,
- replica_envs::UPDATE_MAX_REPLICA_COUNT,
- app->envs[replica_envs::UPDATE_MAX_REPLICA_COUNT]);
-
- dassert_f(old_max_replica_count == app->max_replica_count,
- "app-level max_replica_count has been updated to remote storage, however "
- "old_max_replica_count from response is not consistent with current local "
- "max_replica_count: app_name={}, app_id={}, old_max_replica_count={}, "
- "local_max_replica_count={}, new_max_replica_count={}",
- app->app_name,
- app->app_id,
- old_max_replica_count,
- app->max_replica_count,
- new_max_replica_count);
+ CHECK_EQ_MSG(ec,
+ ERR_OK,
+ "An error that can't be handled occurs while updating remote app-level "
+ "max_replica_count: error_code={}, app_name={}, app_id={}, "
+ "old_max_replica_count={}, new_max_replica_count={}, {}={}",
+ ec,
+ app->app_name,
+ app->app_id,
+ old_max_replica_count,
+ new_max_replica_count,
+ replica_envs::UPDATE_MAX_REPLICA_COUNT,
+ app->envs[replica_envs::UPDATE_MAX_REPLICA_COUNT]);
+
+ CHECK_EQ_MSG(old_max_replica_count,
+ app->max_replica_count,
+ "app-level max_replica_count has been updated to remote storage, however "
+ "old_max_replica_count from response is not consistent with current local "
+ "max_replica_count: app_name={}, app_id={}, old_max_replica_count={}, "
+ "local_max_replica_count={}, new_max_replica_count={}",
+ app->app_name,
+ app->app_id,
+ old_max_replica_count,
+ app->max_replica_count,
+ new_max_replica_count);
app->max_replica_count = new_max_replica_count;
app->envs.erase(replica_envs::UPDATE_MAX_REPLICA_COUNT);
@@ -3543,14 +3492,15 @@ void server_state::update_partition_max_replica_count(std::shared_ptr<app_state>
int32_t new_max_replica_count,
partition_callback on_partition_updated)
{
- dassert_f(partition_index < app->partition_count,
- "partition_index should be < partition_count: app_name={}, app_id={}, "
- "partition_index={}, partition_count={}, new_max_replica_count={}",
- app->app_name,
- app->app_id,
- partition_index,
- app->partition_count,
- new_max_replica_count);
+ CHECK_LT_MSG(partition_index,
+ app->partition_count,
+ "partition_index should be < partition_count: app_name={}, app_id={}, "
+ "partition_index={}, partition_count={}, new_max_replica_count={}",
+ app->app_name,
+ app->app_id,
+ partition_index,
+ app->partition_count,
+ new_max_replica_count);
const auto &old_partition_config = app->partitions[partition_index];
const auto old_max_replica_count = old_partition_config.max_replica_count;
@@ -3587,14 +3537,14 @@ void server_state::update_partition_max_replica_count(std::shared_ptr<app_state>
return;
}
- dassert_f(context.stage == config_status::not_pending,
- "invalid config status while updating max_replica_count: context.stage={}, "
- "app_name={}, app_id={}, partition_index={}, new_max_replica_count={}",
- enum_to_string(context.stage),
- app->app_name,
- app->app_id,
- partition_index,
- new_max_replica_count);
+ CHECK(context.stage == config_status::not_pending,
+ "invalid config status while updating max_replica_count: context.stage={}, "
+ "app_name={}, app_id={}, partition_index={}, new_max_replica_count={}",
+ enum_to_string(context.stage),
+ app->app_name,
+ app->app_id,
+ partition_index,
+ new_max_replica_count);
context.stage = config_status::pending_remote_sync;
context.pending_sync_request.reset();
@@ -3747,17 +3697,18 @@ void server_state::update_partition_max_replica_count_locally(
const auto old_max_replica_count = old_partition_config.max_replica_count;
const auto old_ballot = old_partition_config.ballot;
- dassert_f(old_ballot + 1 == new_ballot,
- "invalid ballot while updating local max_replica_count: app_name={}, app_id={}, "
- "partition_id={}, old_max_replica_count={}, new_max_replica_count={}, "
- "old_ballot={}, new_ballot={}",
- app->app_name,
- app->app_id,
- partition_index,
- old_max_replica_count,
- new_max_replica_count,
- old_ballot,
- new_ballot);
+ CHECK_EQ_MSG(old_ballot + 1,
+ new_ballot,
+ "invalid ballot while updating local max_replica_count: app_name={}, app_id={}, "
+ "partition_id={}, old_max_replica_count={}, new_max_replica_count={}, "
+ "old_ballot={}, new_ballot={}",
+ app->app_name,
+ app->app_id,
+ partition_index,
+ old_max_replica_count,
+ new_max_replica_count,
+ old_ballot,
+ new_ballot);
std::string old_config_str(boost::lexical_cast<std::string>(old_partition_config));
std::string new_config_str(boost::lexical_cast<std::string>(new_partition_config));
@@ -3873,29 +3824,31 @@ void server_state::recover_all_partitions_max_replica_count(std::shared_ptr<app_
std::string old_pc_str(boost::lexical_cast<std::string>(old_pc));
std::string new_pc_str(boost::lexical_cast<std::string>(new_pc));
- dassert_f(ec == ERR_OK,
- "An error that can't be handled occurs while recovering remote "
- "partition-level max_replica_count: error_code={}, app_name={}, "
- "app_id={}, partition_index={}, partition_count={}, "
- "old_partition_config={}, new_partition_config={}",
- ec.to_string(),
- app->app_name,
- app->app_id,
- i,
- app->partition_count,
- old_pc_str,
- new_pc_str);
-
- dassert_f(old_pc.ballot + 1 == new_pc.ballot,
- "invalid ballot while recovering max_replica_count: app_name={}, "
- "app_id={}, partition_index={}, partition_count={}, "
- "old_partition_config={}, new_partition_config={}",
- app->app_name,
- app->app_id,
- i,
- app->partition_count,
- old_pc_str,
- new_pc_str);
+ CHECK_EQ_MSG(ec,
+ ERR_OK,
+ "An error that can't be handled occurs while recovering remote "
+ "partition-level max_replica_count: error_code={}, app_name={}, "
+ "app_id={}, partition_index={}, partition_count={}, "
+ "old_partition_config={}, new_partition_config={}",
+ ec,
+ app->app_name,
+ app->app_id,
+ i,
+ app->partition_count,
+ old_pc_str,
+ new_pc_str);
+
+ CHECK_EQ_MSG(old_pc.ballot + 1,
+ new_pc.ballot,
+ "invalid ballot while recovering max_replica_count: app_name={}, "
+ "app_id={}, partition_index={}, partition_count={}, "
+ "old_partition_config={}, new_partition_config={}",
+ app->app_name,
+ app->app_id,
+ i,
+ app->partition_count,
+ old_pc_str,
+ new_pc_str);
old_pc = new_pc;
@@ -3942,15 +3895,16 @@ void server_state::recover_app_max_replica_count(std::shared_ptr<app_state> &app
zauto_write_lock l(_lock);
auto old_max_replica_count = app->max_replica_count;
- dassert_f(ec == ERR_OK,
- "An error that can't be handled occurs while recovering remote "
- "app-level max_replica_count: error_code={}, app_name={}, app_id={}, "
- "old_max_replica_count={}, new_max_replica_count={}",
- ec.to_string(),
- app->app_name,
- app->app_id,
- old_max_replica_count,
- new_max_replica_count);
+ CHECK_EQ_MSG(ec,
+ ERR_OK,
+ "An error that can't be handled occurs while recovering remote "
+ "app-level max_replica_count: error_code={}, app_name={}, app_id={}, "
+ "old_max_replica_count={}, new_max_replica_count={}",
+ ec,
+ app->app_name,
+ app->app_id,
+ old_max_replica_count,
+ new_max_replica_count);
app->max_replica_count = new_max_replica_count;
app->envs.erase(replica_envs::UPDATE_MAX_REPLICA_COUNT);
diff --git a/src/meta/server_state_restore.cpp b/src/meta/server_state_restore.cpp
index d48c3f52d..e6c9586d4 100644
--- a/src/meta/server_state_restore.cpp
+++ b/src/meta/server_state_restore.cpp
@@ -96,11 +96,8 @@ std::pair<dsn::error_code, std::shared_ptr<app_state>> server_state::restore_app
}
int32_t old_app_id = info.app_id;
std::string old_app_name = info.app_name;
- dassert(old_app_id == req.app_id, "invalid app_id, %d VS %d", old_app_id, req.app_id);
- dassert(old_app_name == req.app_name,
- "invalid app_name, %s VS %s",
- old_app_name.c_str(),
- req.app_name.c_str());
+ CHECK_EQ_MSG(old_app_id, req.app_id, "invalid app id");
+ CHECK_EQ_MSG(old_app_name, req.app_name, "invalid app name");
std::shared_ptr<app_state> app = nullptr;
if (!req.new_app_name.empty()) {
diff --git a/src/meta/test/meta_app_operation_test.cpp b/src/meta/test/meta_app_operation_test.cpp
index b320e46ef..094200d67 100644
--- a/src/meta/test/meta_app_operation_test.cpp
+++ b/src/meta/test/meta_app_operation_test.cpp
@@ -405,10 +405,7 @@ TEST_F(meta_app_operation_test, create_app)
set_min_live_node_count_for_unfreeze(test.min_live_node_count_for_unfreeze);
- dassert_f(total_node_count >= test.alive_node_count,
- "total_node_count({}) should be >= alive_node_count({})",
- total_node_count,
- test.alive_node_count);
+ CHECK_GE(total_node_count, test.alive_node_count);
for (int i = 0; i < total_node_count - test.alive_node_count; i++) {
_ms->set_node_state({nodes[i]}, false);
}
@@ -749,10 +746,7 @@ TEST_F(meta_app_operation_test, set_max_replica_count)
FLAGS_max_allowed_replica_count = test.max_allowed_replica_count;
// set some nodes unalive to match the expected number of alive ndoes
- dassert_f(total_node_count >= test.alive_node_count,
- "total_node_count({}) should be >= alive_node_count({})",
- total_node_count,
- test.alive_node_count);
+ CHECK_GE(total_node_count, test.alive_node_count);
for (int i = 0; i < total_node_count - test.alive_node_count; i++) {
_ms->set_node_state({nodes[i]}, false);
}
diff --git a/src/meta/test/meta_state/meta_state_service.cpp b/src/meta/test/meta_state/meta_state_service.cpp
index 4bea8abc1..d6196a6b2 100644
--- a/src/meta/test/meta_state/meta_state_service.cpp
+++ b/src/meta/test/meta_state/meta_state_service.cpp
@@ -105,7 +105,7 @@ void provider_basic_test(const service_creator_func &service_creator,
dsn::binary_reader reader(value);
int read_value = 0;
reader.read(read_value);
- dassert(read_value == 0xdeadbeef, "get_value != create_value");
+ CHECK_EQ(read_value, 0xdeadbeef);
})
->wait();
writer = dsn::binary_writer();
@@ -122,7 +122,7 @@ void provider_basic_test(const service_creator_func &service_creator,
dsn::binary_reader reader(value);
int read_value = 0;
reader.read(read_value);
- dassert(read_value == 0xbeefdead, "get_value != create_value");
+ CHECK_EQ(read_value, 0xbeefdead);
})
->wait();
}
diff --git a/src/meta/test/meta_test_base.cpp b/src/meta/test/meta_test_base.cpp
index 438c8e824..cbc5aebc2 100644
--- a/src/meta/test/meta_test_base.cpp
+++ b/src/meta/test/meta_test_base.cpp
@@ -122,10 +122,11 @@ std::vector<rpc_address> meta_test_base::ensure_enough_alive_nodes(int min_node_
std::vector<dsn::rpc_address> nodes(get_alive_nodes());
if (!nodes.empty()) {
auto node_count = static_cast<int>(nodes.size());
- dassert_f(node_count >= min_node_count,
- "there should be at least {} alive nodes, now we just have {} alive nodes",
- min_node_count,
- node_count);
+ CHECK_GE_MSG(node_count,
+ min_node_count,
+ "there should be at least {} alive nodes, now we just have {} alive nodes",
+ min_node_count,
+ node_count);
LOG_DEBUG_F("already exists {} alive nodes: ", nodes.size());
for (const auto &node : nodes) {
diff --git a/src/nfs/nfs_client_impl.cpp b/src/nfs/nfs_client_impl.cpp
index 98eebde13..304783d3c 100644
--- a/src/nfs/nfs_client_impl.cpp
+++ b/src/nfs/nfs_client_impl.cpp
@@ -200,7 +200,8 @@ void nfs_client_impl::end_get_file_size(::dsn::error_code err,
req_offset += req_size;
size -= req_size;
if (size <= 0) {
- dassert(size == 0, "last request must read exactly the remaing size of the file");
+ CHECK_EQ_MSG(
+ size, 0, "last request must read exactly the remaing size of the file");
break;
}
diff --git a/src/nfs/nfs_client_impl.h b/src/nfs/nfs_client_impl.h
index 6d26b4e20..81f9f8a40 100644
--- a/src/nfs/nfs_client_impl.h
+++ b/src/nfs/nfs_client_impl.h
@@ -93,7 +93,7 @@ public:
{
if (file_handle != nullptr) {
auto err = file::close(file_handle);
- dassert(err == ERR_OK, "file::close failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(err, ERR_OK, "file::close failed");
}
}
};
diff --git a/src/nfs/nfs_server_impl.h b/src/nfs/nfs_server_impl.h
index 49d8f91b5..987104c15 100644
--- a/src/nfs/nfs_server_impl.h
+++ b/src/nfs/nfs_server_impl.h
@@ -113,7 +113,7 @@ private:
~file_handle_info_on_server()
{
error_code err = file::close(file_handle);
- dassert(err == ERR_OK, "file::close failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(err, ERR_OK, "file::close failed");
}
};
diff --git a/src/perf_counter/perf_counter_atomic.h b/src/perf_counter/perf_counter_atomic.h
index 6e9284809..90328fb1a 100644
--- a/src/perf_counter/perf_counter_atomic.h
+++ b/src/perf_counter/perf_counter_atomic.h
@@ -275,7 +275,7 @@ public:
virtual int get_latest_samples(int required_sample_count,
/*out*/ samples_t &samples) const override
{
- dassert(required_sample_count <= MAX_QUEUE_LENGTH, "");
+ CHECK_LE(required_sample_count, MAX_QUEUE_LENGTH);
uint64_t count = _tail.load();
int return_count = count >= (uint64_t)required_sample_count ? required_sample_count : count;
diff --git a/src/perf_counter/perf_counters.cpp b/src/perf_counter/perf_counters.cpp
index 1dae74622..32a815217 100644
--- a/src/perf_counter/perf_counters.cpp
+++ b/src/perf_counter/perf_counters.cpp
@@ -131,11 +131,10 @@ perf_counter_ptr perf_counters::get_global_counter(const char *app,
_counters.emplace(full_name, counter_object{counter, 1});
return counter;
} else {
- dassert(it->second.counter->type() == flags,
- "counters with the same name %s with differnt types, (%d) vs (%d)",
- full_name.c_str(),
- it->second.counter->type(),
- flags);
+ CHECK_EQ_MSG(it->second.counter->type(),
+ flags,
+ "counters with the same name {} with differnt types",
+ full_name);
++it->second.user_reference;
return it->second.counter;
}
diff --git a/src/redis_protocol/proxy_lib/proxy_layer.cpp b/src/redis_protocol/proxy_lib/proxy_layer.cpp
index 3172eff3f..d3af6009b 100644
--- a/src/redis_protocol/proxy_lib/proxy_layer.cpp
+++ b/src/redis_protocol/proxy_lib/proxy_layer.cpp
@@ -110,9 +110,7 @@ proxy_session::proxy_session(proxy_stub *op, dsn::message_ex *first_msg)
_backup_one_request->add_ref();
_remote_address = _backup_one_request->header->from_address;
- dassert(_remote_address.type() == HOST_TYPE_IPV4,
- "invalid rpc_address type, type = %d",
- (int)_remote_address.type());
+ CHECK_EQ_MSG(_remote_address.type(), HOST_TYPE_IPV4, "invalid rpc_address type");
}
proxy_session::~proxy_session()
diff --git a/src/redis_protocol/proxy_lib/redis_parser.cpp b/src/redis_protocol/proxy_lib/redis_parser.cpp
index ab2f7a2b7..280c7eea6 100644
--- a/src/redis_protocol/proxy_lib/redis_parser.cpp
+++ b/src/redis_protocol/proxy_lib/redis_parser.cpp
@@ -934,7 +934,7 @@ void redis_parser::decr_by(message_entry &entry) { counter_internal(entry); }
void redis_parser::counter_internal(message_entry &entry)
{
CHECK(!entry.request.sub_requests.empty(), "");
- dassert(entry.request.sub_requests[0].length > 0, "");
+ CHECK_GT(entry.request.sub_requests[0].length, 0);
const char *command = entry.request.sub_requests[0].data.data();
int64_t increment = 1;
if (strcasecmp(command, "INCR") == 0 || strcasecmp(command, "DECR") == 0) {
@@ -1327,9 +1327,7 @@ void redis_parser::handle_command(std::unique_ptr<message_entry> &&entry)
e.sequence_id);
enqueue_pending_response(std::move(entry));
- dassert(request.sub_request_count > 0,
- "invalid request, request.length = %d",
- request.sub_request_count);
+ CHECK_GT_MSG(request.sub_request_count, 0, "invalid request");
::dsn::blob &command = request.sub_requests[0].data;
redis_call_handler handler = redis_parser::get_handler(command.data(), command.length());
handler(this, e);
diff --git a/src/replica/backup/cold_backup_context.cpp b/src/replica/backup/cold_backup_context.cpp
index 902fef38c..bf75e8121 100644
--- a/src/replica/backup/cold_backup_context.cpp
+++ b/src/replica/backup/cold_backup_context.cpp
@@ -1064,7 +1064,7 @@ void cold_backup_context::file_upload_uncomplete(const std::string &filename)
{
zauto_lock l(_lock);
- dassert(_cur_upload_file_cnt >= 1, "cur_upload_file_cnt = %d", _cur_upload_file_cnt);
+ CHECK_GE(_cur_upload_file_cnt, 1);
_cur_upload_file_cnt -= 1;
_file_remain_cnt += 1;
_file_status[filename] = file_status::FileUploadUncomplete;
@@ -1074,7 +1074,7 @@ void cold_backup_context::file_upload_complete(const std::string &filename)
{
zauto_lock l(_lock);
- dassert(_cur_upload_file_cnt >= 1, "cur_upload_file_cnt = %d", _cur_upload_file_cnt);
+ CHECK_GE(_cur_upload_file_cnt, 1);
_cur_upload_file_cnt -= 1;
_file_status[filename] = file_status::FileUploadComplete;
}
diff --git a/src/replica/backup/cold_backup_context.h b/src/replica/backup/cold_backup_context.h
index ae06d647b..c7026f8ad 100644
--- a/src/replica/backup/cold_backup_context.h
+++ b/src/replica/backup/cold_backup_context.h
@@ -17,11 +17,11 @@
#pragma once
-#include "utils/zlocks.h"
-#include "common/json_helper.h"
#include "block_service/block_service.h"
-
#include "common/backup_common.h"
+#include "common/json_helper.h"
+#include "utils/fmt_logging.h"
+#include "utils/zlocks.h"
class replication_service_test_app;
@@ -225,9 +225,9 @@ public:
// Progress should be in range of [0, 1000].
void update_progress(int progress)
{
- dassert(progress >= 0 && progress <= cold_backup_constant::PROGRESS_FINISHED,
- "invalid progress %d",
- progress);
+ CHECK(progress >= 0 && progress <= cold_backup_constant::PROGRESS_FINISHED,
+ "invalid progress {}",
+ progress);
_progress.store(progress);
}
diff --git a/src/replica/duplication/replica_duplicator.cpp b/src/replica/duplication/replica_duplicator.cpp
index ca8fe819d..4d49f144a 100644
--- a/src/replica/duplication/replica_duplicator.cpp
+++ b/src/replica/duplication/replica_duplicator.cpp
@@ -215,13 +215,14 @@ void replica_duplicator::verify_start_decree(decree start_decree)
decree confirmed_decree = progress().confirmed_decree;
decree last_decree = progress().last_decree;
decree max_gced_decree = get_max_gced_decree();
- dassert_f(max_gced_decree < start_decree,
- "the logs haven't yet duplicated were accidentally truncated "
- "[max_gced_decree: {}, start_decree: {}, confirmed_decree: {}, last_decree: {}]",
- max_gced_decree,
- start_decree,
- confirmed_decree,
- last_decree);
+ CHECK_LT_MSG(max_gced_decree,
+ start_decree,
+ "the logs haven't yet duplicated were accidentally truncated "
+ "[max_gced_decree: {}, start_decree: {}, confirmed_decree: {}, last_decree: {}]",
+ max_gced_decree,
+ start_decree,
+ confirmed_decree,
+ last_decree);
}
decree replica_duplicator::get_max_gced_decree() const
diff --git a/src/replica/log_file.cpp b/src/replica/log_file.cpp
index 1cabb01e6..bb6ff584b 100644
--- a/src/replica/log_file.cpp
+++ b/src/replica/log_file.cpp
@@ -179,7 +179,7 @@ void log_file::close()
_stream.reset(nullptr);
if (_handle) {
error_code err = file::close(_handle);
- dassert(err == ERR_OK, "file::close failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(err, ERR_OK, "file::close failed");
_handle = nullptr;
}
@@ -192,7 +192,7 @@ void log_file::flush() const
if (_handle) {
error_code err = file::flush(_handle);
- dassert(err == ERR_OK, "file::flush failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(err, ERR_OK, "file::flush failed");
}
}
@@ -278,7 +278,7 @@ aio_task_ptr log_file::commit_log_blocks(log_appender &pending,
int64_t local_offset = block.start_offset() - start_offset();
auto hdr = reinterpret_cast<log_block_header *>(const_cast<char *>(block.front().data()));
- dassert(hdr->magic == 0xdeadbeef, "");
+ CHECK_EQ(hdr->magic, 0xdeadbeef);
hdr->local_offset = local_offset;
hdr->length = static_cast<int32_t>(block.size() - sizeof(log_block_header));
hdr->body_crc = _crc32;
diff --git a/src/replica/log_file_stream.h b/src/replica/log_file_stream.h
index cb9c6d19e..059f271cd 100644
--- a/src/replica/log_file_stream.h
+++ b/src/replica/log_file_stream.h
@@ -178,7 +178,7 @@ private:
_task->wait();
_have_ongoing_task = false;
_end += _task->get_transferred_size();
- dassert(_end <= block_size_bytes, "invalid io_size.");
+ CHECK_LE_MSG(_end, block_size_bytes, "invalid io_size");
return _task->error();
} else {
return ERR_OK;
diff --git a/src/replica/mutation.cpp b/src/replica/mutation.cpp
index aec7a37bf..b54555582 100644
--- a/src/replica/mutation.cpp
+++ b/src/replica/mutation.cpp
@@ -172,7 +172,7 @@ void mutation::add_client_request(task_code code, dsn::message_ex *request)
client_requests.push_back(request);
- dassert(client_requests.size() == data.updates.size(), "size must be equal");
+ CHECK_EQ(client_requests.size(), data.updates.size());
}
void mutation::write_to(const std::function<void(const blob &)> &inserter) const
diff --git a/src/replica/mutation_cache.cpp b/src/replica/mutation_cache.cpp
index 09a7d4b9b..47277b50f 100644
--- a/src/replica/mutation_cache.cpp
+++ b/src/replica/mutation_cache.cpp
@@ -78,10 +78,7 @@ error_code mutation_cache::put(mutation_ptr &mu)
int idx = ((decree - _end_decree) + _end_idx + _max_count) % _max_count;
mutation_ptr &old = _array[idx];
if (old != nullptr) {
- dassert(old->data.header.ballot <= mu->data.header.ballot,
- "%" PRId64 " VS %" PRId64 "",
- old->data.header.ballot,
- mu->data.header.ballot);
+ CHECK_LE(old->data.header.ballot, mu->data.header.ballot);
}
_array[idx] = mu;
diff --git a/src/replica/mutation_log.cpp b/src/replica/mutation_log.cpp
index 947cb3d10..7530648c9 100644
--- a/src/replica/mutation_log.cpp
+++ b/src/replica/mutation_log.cpp
@@ -113,10 +113,10 @@ void mutation_log_shared::flush_internal(int max_count)
void mutation_log_shared::write_pending_mutations(bool release_lock_required)
{
- dassert(release_lock_required, "lock must be hold at this point");
- dassert(!_is_writing.load(std::memory_order_relaxed), "");
- dassert(_pending_write != nullptr, "");
- dassert(_pending_write->size() > 0, "pending write size = %d", (int)_pending_write->size());
+ CHECK(release_lock_required, "lock must be hold at this point");
+ CHECK(!_is_writing.load(std::memory_order_relaxed), "");
+ CHECK_NOTNULL(_pending_write, "");
+ CHECK_GT(_pending_write->size(), 0);
auto pr = mark_new_offset(_pending_write->size(), false);
CHECK_EQ(pr.second, _pending_write->start_offset());
@@ -143,7 +143,7 @@ void mutation_log_shared::commit_pending_mutations(log_file_ptr &lf,
LPC_WRITE_REPLICATION_LOG_SHARED,
&_tracker,
[this, lf, pending](error_code err, size_t sz) mutable {
- dassert(_is_writing.load(std::memory_order_relaxed), "");
+ CHECK(_is_writing.load(std::memory_order_relaxed), "");
if (utils::FLAGS_enable_latency_tracer) {
for (auto &mu : pending->mutations()) {
@@ -153,7 +153,7 @@ void mutation_log_shared::commit_pending_mutations(log_file_ptr &lf,
for (auto &block : pending->all_blocks()) {
auto hdr = (log_block_header *)block.front().data();
- dassert(hdr->magic == 0xdeadbeef, "header magic is changed: 0x%x", hdr->magic);
+ CHECK_EQ(hdr->magic, 0xdeadbeef);
}
if (err == ERR_OK) {
@@ -367,10 +367,10 @@ void mutation_log_private::init_states()
void mutation_log_private::write_pending_mutations(bool release_lock_required)
{
- dassert(release_lock_required, "lock must be hold at this point");
- dassert(!_is_writing.load(std::memory_order_relaxed), "");
- dassert(_pending_write != nullptr, "");
- dassert(_pending_write->size() > 0, "pending write size = %d", (int)_pending_write->size());
+ CHECK(release_lock_required, "lock must be hold at this point");
+ CHECK(!_is_writing.load(std::memory_order_relaxed), "");
+ CHECK_NOTNULL(_pending_write, "");
+ CHECK_GT(_pending_write->size(), 0);
auto pr = mark_new_offset(_pending_write->size(), false);
CHECK_EQ_PREFIX(pr.second, _pending_write->start_offset());
@@ -401,63 +401,64 @@ void mutation_log_private::commit_pending_mutations(log_file_ptr &lf,
}
}
- lf->commit_log_blocks(
- *pending,
- LPC_WRITE_REPLICATION_LOG_PRIVATE,
- &_tracker,
- [this, lf, pending, max_commit](error_code err, size_t sz) mutable {
- dassert(_is_writing.load(std::memory_order_relaxed), "");
-
- for (auto &block : pending->all_blocks()) {
- auto hdr = (log_block_header *)block.front().data();
- dassert(hdr->magic == 0xdeadbeef, "header magic is changed: 0x%x", hdr->magic);
- }
-
- if (dsn_unlikely(utils::FLAGS_enable_latency_tracer)) {
- for (const auto &mu : pending->mutations()) {
- ADD_CUSTOM_POINT(mu->_tracer, "commit_pending_completed");
- }
- }
-
- // notify the callbacks
- // ATTENTION: callback may be called before this code block executed done.
- for (auto &c : pending->callbacks()) {
- c->enqueue(err, sz);
- }
-
- if (err != ERR_OK) {
- LOG_ERROR("write private log failed, err = %s", err.to_string());
- _is_writing.store(false, std::memory_order_relaxed);
- if (_io_error_callback) {
- _io_error_callback(err);
- }
- return;
- }
- CHECK_EQ(sz, pending->size());
-
- // flush to ensure that there is no gap between private log and in-memory buffer
- // so that we can get all mutations in learning process.
- //
- // FIXME : the file could have been closed
- if (FLAGS_plog_force_flush) {
- lf->flush();
- }
-
- // update _private_max_commit_on_disk after written into log file done
- update_max_commit_on_disk(max_commit);
-
- _is_writing.store(false, std::memory_order_relaxed);
-
- // start to write if possible
- _plock.lock();
-
- if (!_is_writing.load(std::memory_order_acquire) && _pending_write) {
- write_pending_mutations(true);
- } else {
- _plock.unlock();
- }
- },
- get_gpid().thread_hash());
+ lf->commit_log_blocks(*pending,
+ LPC_WRITE_REPLICATION_LOG_PRIVATE,
+ &_tracker,
+ [this, lf, pending, max_commit](error_code err, size_t sz) mutable {
+ CHECK(_is_writing.load(std::memory_order_relaxed), "");
+
+ for (auto &block : pending->all_blocks()) {
+ auto hdr = (log_block_header *)block.front().data();
+ CHECK_EQ(hdr->magic, 0xdeadbeef);
+ }
+
+ if (dsn_unlikely(utils::FLAGS_enable_latency_tracer)) {
+ for (const auto &mu : pending->mutations()) {
+ ADD_CUSTOM_POINT(mu->_tracer, "commit_pending_completed");
+ }
+ }
+
+ // notify the callbacks
+ // ATTENTION: callback may be called before this code block executed
+ // done.
+ for (auto &c : pending->callbacks()) {
+ c->enqueue(err, sz);
+ }
+
+ if (err != ERR_OK) {
+ LOG_ERROR("write private log failed, err = %s", err.to_string());
+ _is_writing.store(false, std::memory_order_relaxed);
+ if (_io_error_callback) {
+ _io_error_callback(err);
+ }
+ return;
+ }
+ CHECK_EQ(sz, pending->size());
+
+ // flush to ensure that there is no gap between private log and
+ // in-memory buffer
+ // so that we can get all mutations in learning process.
+ //
+ // FIXME : the file could have been closed
+ if (FLAGS_plog_force_flush) {
+ lf->flush();
+ }
+
+ // update _private_max_commit_on_disk after written into log file done
+ update_max_commit_on_disk(max_commit);
+
+ _is_writing.store(false, std::memory_order_relaxed);
+
+ // start to write if possible
+ _plock.lock();
+
+ if (!_is_writing.load(std::memory_order_acquire) && _pending_write) {
+ write_pending_mutations(true);
+ } else {
+ _plock.unlock();
+ }
+ },
+ get_gpid().thread_hash());
}
///////////////////////////////////////////////////////////////
@@ -472,12 +473,7 @@ mutation_log::mutation_log(const std::string &dir, int32_t max_log_file_mb, gpid
_private_gpid = gpid;
if (r) {
- dassert(_private_gpid == r->get_gpid(),
- "(%d.%d) VS (%d.%d)",
- _private_gpid.get_app_id(),
- _private_gpid.get_partition_index(),
- r->get_gpid().get_app_id(),
- r->get_gpid().get_partition_index());
+ CHECK_EQ(_private_gpid, r->get_gpid());
}
mutation_log::init_states();
}
@@ -512,8 +508,8 @@ error_code mutation_log::open(replay_callback read_callback,
io_failure_callback write_error_callback,
const std::map<gpid, decree> &replay_condition)
{
- dassert(!_is_opened, "cannot open a opened mutation_log");
- dassert(nullptr == _current_log_file, "the current log file must be null at this point");
+ CHECK(!_is_opened, "cannot open a opened mutation_log");
+ CHECK(nullptr == _current_log_file, "");
// create dir if necessary
if (!dsn::utils::filesystem::path_exists(_dir)) {
@@ -534,7 +530,7 @@ error_code mutation_log::open(replay_callback read_callback,
}
if (nullptr == read_callback) {
- dassert(file_list.size() == 0, "log must be empty if callback is not present");
+ CHECK(file_list.empty(), "");
}
std::sort(file_list.begin(), file_list.end());
@@ -571,9 +567,9 @@ error_code mutation_log::open(replay_callback read_callback,
log->end_offset() - log->start_offset());
}
- dassert(_log_files.find(log->index()) == _log_files.end(),
- "invalid log_index, index = %d",
- log->index());
+ CHECK(_log_files.find(log->index()) == _log_files.end(),
+ "invalid log_index, index = {}",
+ log->index());
_log_files[log->index()] = log;
}
@@ -585,10 +581,7 @@ error_code mutation_log::open(replay_callback read_callback,
if (!replay_condition.empty()) {
if (_is_private) {
auto find = replay_condition.find(_private_gpid);
- dassert(find != replay_condition.end(),
- "invalid gpid(%d.%d)",
- _private_gpid.get_app_id(),
- _private_gpid.get_partition_index());
+ CHECK(find != replay_condition.end(), "invalid gpid({})", _private_gpid);
for (auto it = _log_files.begin(); it != _log_files.end(); ++it) {
if (it->second->previous_log_max_decree(_private_gpid) <= find->second) {
// previous logs can be ignored
@@ -641,13 +634,13 @@ error_code mutation_log::open(replay_callback read_callback,
if (mark_it != _log_files.rend()) {
// set replay_begin to the next position of mark_it.
replay_begin = _log_files.find(mark_it->first);
- dassert(replay_begin != _log_files.end(),
- "invalid log_index, index = %d",
- mark_it->first);
+ CHECK(replay_begin != _log_files.end(),
+ "invalid log_index, index = {}",
+ mark_it->first);
replay_begin++;
- dassert(replay_begin != _log_files.end(),
- "invalid log_index, index = %d",
- mark_it->first);
+ CHECK(replay_begin != _log_files.end(),
+ "invalid log_index, index = {}",
+ mark_it->first);
}
}
@@ -737,23 +730,17 @@ error_code mutation_log::create_new_log_file()
LOG_ERROR("cannot create log file with index %d", _last_file_index + 1);
return ERR_FILE_OPERATION_FAILED;
}
- dassert(logf->end_offset() == logf->start_offset(),
- "%" PRId64 " VS %" PRId64 "",
- logf->end_offset(),
- logf->start_offset());
- dassert(_global_end_offset == logf->end_offset(),
- "%" PRId64 " VS %" PRId64 "",
- _global_end_offset,
- logf->start_offset());
+ CHECK_EQ(logf->end_offset(), logf->start_offset());
+ CHECK_EQ(_global_end_offset, logf->end_offset());
LOG_INFO("create new log file %s succeed, time_used = %" PRIu64 " ns",
logf->path().c_str(),
dsn_now_ns() - start);
// update states
_last_file_index++;
- dassert(_log_files.find(_last_file_index) == _log_files.end(),
- "invalid log_offset, offset = %d",
- _last_file_index);
+ CHECK(_log_files.find(_last_file_index) == _log_files.end(),
+ "invalid log_offset, offset = {}",
+ _last_file_index);
_log_files[_last_file_index] = logf;
// switch the current log file
@@ -788,23 +775,18 @@ error_code mutation_log::create_new_log_file()
"write mutation log file header failed, file = %s, err = %s",
logf->path().c_str(),
err.to_string());
- if (_io_error_callback) {
- _io_error_callback(err);
- } else {
- dassert(false, "unhandled error");
- }
+ CHECK(_io_error_callback, "");
+ _io_error_callback(err);
}
},
0);
- dassert(_global_end_offset ==
- _current_log_file->start_offset() + sizeof(log_block_header) + header_len,
- "%" PRId64 " VS %" PRId64 "(%" PRId64 " + %d + %d)",
- _global_end_offset,
- _current_log_file->start_offset() + sizeof(log_block_header) + header_len,
- _current_log_file->start_offset(),
- (int)sizeof(log_block_header),
- (int)header_len);
+ CHECK_EQ_MSG(_global_end_offset,
+ _current_log_file->start_offset() + sizeof(log_block_header) + header_len,
+ "current log file start offset: {}, log_block_header size: {}, header_len: {}",
+ _current_log_file->start_offset(),
+ sizeof(log_block_header),
+ header_len);
return ERR_OK;
}
@@ -842,15 +824,15 @@ std::pair<log_file_ptr, int64_t> mutation_log::mark_new_offset(size_t size,
if (create_file) {
auto ec = create_new_log_file();
- dassert_f(ec == ERR_OK,
- "{} create new log file failed: {}",
- _is_private ? _private_gpid.to_string() : "",
- ec);
+ CHECK_EQ_MSG(ec,
+ ERR_OK,
+ "{} create new log file failed",
+ _is_private ? _private_gpid.to_string() : "");
_switch_file_hint = false;
_switch_file_demand = false;
}
} else {
- dassert(_current_log_file != nullptr, "");
+ CHECK_NOTNULL(_current_log_file, "");
}
int64_t write_start_offset = _global_end_offset;
@@ -863,7 +845,7 @@ decree mutation_log::max_decree(gpid gpid) const
{
zauto_lock l(_lock);
if (_is_private) {
- dassert(gpid == _private_gpid, "replica gpid does not match");
+ CHECK_EQ(gpid, _private_gpid);
return _private_log_info.max_decree;
} else {
auto it = _shared_log_info_map.find(gpid);
@@ -877,7 +859,7 @@ decree mutation_log::max_decree(gpid gpid) const
decree mutation_log::max_commit_on_disk() const
{
zauto_lock l(_lock);
- dassert(_is_private, "this method is only valid for private logs");
+ CHECK(_is_private, "this method is only valid for private logs");
return _private_max_commit_on_disk;
}
@@ -889,7 +871,7 @@ decree mutation_log::max_gced_decree(gpid gpid) const
decree mutation_log::max_gced_decree_no_lock(gpid gpid) const
{
- dassert(_is_private, "");
+ CHECK(_is_private, "");
decree result = invalid_decree;
for (auto &log : _log_files) {
@@ -909,17 +891,11 @@ void mutation_log::check_valid_start_offset(gpid gpid, int64_t valid_start_offse
{
zauto_lock l(_lock);
if (_is_private) {
- dassert(valid_start_offset == _private_log_info.valid_start_offset,
- "valid start offset mismatch: %" PRId64 " vs %" PRId64,
- valid_start_offset,
- _private_log_info.valid_start_offset);
+ CHECK_EQ(valid_start_offset, _private_log_info.valid_start_offset);
} else {
auto it = _shared_log_info_map.find(gpid);
if (it != _shared_log_info_map.end()) {
- dassert(valid_start_offset == it->second.valid_start_offset,
- "valid start offset mismatch: %" PRId64 " vs %" PRId64,
- valid_start_offset,
- it->second.valid_start_offset);
+ CHECK_EQ(valid_start_offset, it->second.valid_start_offset);
}
}
}
@@ -1009,7 +985,7 @@ void mutation_log::set_valid_start_offset_on_open(gpid gpid, int64_t valid_start
{
zauto_lock l(_lock);
if (_is_private) {
- dassert(gpid == _private_gpid, "replica gpid does not match");
+ CHECK_EQ(gpid, _private_gpid);
_private_log_info.valid_start_offset = valid_start_offset;
} else {
_shared_log_info_map[gpid] = replica_log_info(0, valid_start_offset);
@@ -1020,7 +996,7 @@ int64_t mutation_log::on_partition_reset(gpid gpid, decree max_decree)
{
zauto_lock l(_lock);
if (_is_private) {
- dassert(_private_gpid == gpid, "replica gpid does not match");
+ CHECK_EQ(_private_gpid, gpid);
replica_log_info old_info = _private_log_info;
_private_log_info.max_decree = max_decree;
_private_log_info.valid_start_offset = _global_end_offset;
@@ -1052,7 +1028,7 @@ int64_t mutation_log::on_partition_reset(gpid gpid, decree max_decree)
void mutation_log::on_partition_removed(gpid gpid)
{
- dassert(!_is_private, "this method is only valid for shared logs");
+ CHECK(!_is_private, "this method is only valid for shared logs");
zauto_lock l(_lock);
_shared_log_info_map.erase(gpid);
}
@@ -1072,7 +1048,7 @@ void mutation_log::update_max_decree_no_lock(gpid gpid, decree d)
it->second.max_decree = d;
}
} else {
- dassert(false, "replica has not been registered in the log before");
+ CHECK(false, "replica has not been registered in the log before");
}
} else {
CHECK_EQ(gpid, _private_gpid);
@@ -1090,7 +1066,7 @@ void mutation_log::update_max_commit_on_disk(decree d)
void mutation_log::update_max_commit_on_disk_no_lock(decree d)
{
- dassert(_is_private, "this method is only valid for private logs");
+ CHECK(_is_private, "this method is only valid for private logs");
if (d > _private_max_commit_on_disk) {
_private_max_commit_on_disk = d;
}
@@ -1098,13 +1074,8 @@ void mutation_log::update_max_commit_on_disk_no_lock(decree d)
bool mutation_log::get_learn_state(gpid gpid, decree start, /*out*/ learn_state &state) const
{
- dassert(_is_private, "this method is only valid for private logs");
- dassert(_private_gpid == gpid,
- "replica gpid does not match, (%d.%d) VS (%d.%d)",
- _private_gpid.get_app_id(),
- _private_gpid.get_partition_index(),
- gpid.get_app_id(),
- gpid.get_partition_index());
+ CHECK(_is_private, "this method is only valid for private logs");
+ CHECK_EQ(_private_gpid, gpid);
binary_writer temp_writer;
if (get_learn_state_in_memory(start, temp_writer)) {
@@ -1208,7 +1179,7 @@ void mutation_log::get_parent_mutations_and_logs(gpid pid,
std::vector<std::string> &files,
uint64_t &total_file_size) const
{
- dassert(_is_private, "this method is only valid for private logs");
+ CHECK(_is_private, "this method is only valid for private logs");
CHECK_EQ(_private_gpid, pid);
mutation_list.clear();
@@ -1304,7 +1275,7 @@ int mutation_log::garbage_collection(gpid gpid,
int64_t reserve_max_size,
int64_t reserve_max_time)
{
- dassert(_is_private, "this method is only valid for private log");
+ CHECK(_is_private, "this method is only valid for private log");
std::map<int, log_file_ptr> files;
decree max_decree = invalid_decree;
@@ -1323,9 +1294,9 @@ int mutation_log::garbage_collection(gpid gpid,
return 0;
} else {
// the last one should be the current log file
- dassert(current_file_index == -1 || files.rbegin()->first == current_file_index,
- "invalid current_file_index, index = %d",
- current_file_index);
+ CHECK(current_file_index == -1 || files.rbegin()->first == current_file_index,
+ "invalid current_file_index, index = {}",
+ current_file_index);
}
// find the largest file which can be deleted.
@@ -1334,7 +1305,7 @@ int mutation_log::garbage_collection(gpid gpid,
int64_t already_reserved_size = 0;
for (mark_it = files.rbegin(); mark_it != files.rend(); ++mark_it) {
log_file_ptr log = mark_it->second;
- dassert(mark_it->first == log->index(), "%d VS %d", mark_it->first, log->index());
+ CHECK_EQ(mark_it->first, log->index());
// currently, "max_decree" is the max decree covered by this log.
// reserve current file
@@ -1375,7 +1346,7 @@ int mutation_log::garbage_collection(gpid gpid,
// update max decree for the next log file
auto &max_decrees = log->previous_log_max_decrees();
auto it3 = max_decrees.find(gpid);
- dassert(it3 != max_decrees.end(), "impossible for private logs");
+ CHECK(it3 != max_decrees.end(), "impossible for private logs");
max_decree = it3->second.max_decree;
already_reserved_size += log->end_offset() - log->start_offset();
}
@@ -1427,7 +1398,7 @@ int mutation_log::garbage_collection(const replica_log_info_map &gc_condition,
int file_count_limit,
std::set<gpid> &prevent_gc_replicas)
{
- dassert(!_is_private, "this method is only valid for shared log");
+ CHECK(!_is_private, "this method is only valid for shared log");
std::map<int, log_file_ptr> files;
replica_log_info_map max_decrees;
@@ -1454,9 +1425,9 @@ int mutation_log::garbage_collection(const replica_log_info_map &gc_condition,
return (int)files.size();
} else {
// the last one should be the current log file
- dassert(-1 == current_log_index || files.rbegin()->first == current_log_index,
- "invalid current_log_index, index = %d",
- current_log_index);
+ CHECK(-1 == current_log_index || files.rbegin()->first == current_log_index,
+ "invalid current_log_index, index = {}",
+ current_log_index);
}
int reserved_log_count = files.size();
@@ -1476,7 +1447,7 @@ int mutation_log::garbage_collection(const replica_log_info_map &gc_condition,
int file_count = 0;
for (mark_it = files.rbegin(); mark_it != files.rend(); ++mark_it) {
log_file_ptr log = mark_it->second;
- dassert(mark_it->first == log->index(), "%d VS %d", mark_it->first, log->index());
+ CHECK_EQ(mark_it->first, log->index());
file_count++;
bool delete_ok = true;
@@ -1507,9 +1478,8 @@ int mutation_log::garbage_collection(const replica_log_info_map &gc_condition,
if (it3 == max_decrees.end()) {
// valid_start_offset may be reset to 0 if initialize_on_load() returns
// ERR_INCOMPLETE_DATA
- dassert(
- valid_start_offset == 0 || valid_start_offset >= log->end_offset(),
- "valid start offset must be 0 or greater than the end of this log file");
+ CHECK(valid_start_offset == 0 || valid_start_offset >= log->end_offset(),
+ "valid start offset must be 0 or greater than the end of this log file");
LOG_DEBUG("gc @ %d.%d: max_decree for %s is missing vs %" PRId64
" as garbage max decree,"
@@ -1651,7 +1621,7 @@ int mutation_log::garbage_collection(const replica_log_info_map &gc_condition,
for (auto it = files.begin(); it != files.end() && it->second->index() <= largest_log_to_delete;
++it) {
log_file_ptr log = it->second;
- dassert(it->first == log->index(), "%d VS %d", it->first, log->index());
+ CHECK_EQ(it->first, log->index());
to_delete_log_count++;
to_delete_log_size += log->end_offset() - log->start_offset();
diff --git a/src/replica/mutation_log_replay.cpp b/src/replica/mutation_log_replay.cpp
index cc1552cd8..2203707dc 100644
--- a/src/replica/mutation_log_replay.cpp
+++ b/src/replica/mutation_log_replay.cpp
@@ -191,10 +191,7 @@ namespace replication {
if (err == ERR_OK || err == ERR_HANDLE_EOF) {
// the log may still be written when used for learning
- dassert(g_end_offset <= end_offset,
- "make sure the global end offset is correct: %" PRId64 " vs %" PRId64,
- g_end_offset,
- end_offset);
+ CHECK_LE(g_end_offset, end_offset);
err = ERR_OK;
} else if (err == ERR_INCOMPLETE_DATA) {
// ignore the last incomplate block
diff --git a/src/replica/prepare_list.cpp b/src/replica/prepare_list.cpp
index 6f7a67cc1..9eb1300d8 100644
--- a/src/replica/prepare_list.cpp
+++ b/src/replica/prepare_list.cpp
@@ -109,7 +109,7 @@ error_code prepare_list::prepare(mutation_ptr &mu,
// err = mutation_cache::put(mu);
// if (err == ERR_CAPACITY_EXCEEDED)
// {
- // dassert(mu->data.header.last_committed_decree >= min_decree(), "");
+ // CHECK_GE(mu->data.header.last_committed_decree, min_decree());
// commit (min_decree(), true);
// pop_min();
// }
diff --git a/src/replica/replica.cpp b/src/replica/replica.cpp
index d5c9cd28d..1bd14a12a 100644
--- a/src/replica/replica.cpp
+++ b/src/replica/replica.cpp
@@ -279,14 +279,8 @@ void replica::response_client_write(dsn::message_ex *request, error_code error)
void replica::check_state_completeness()
{
/* prepare commit durable */
- dassert(max_prepared_decree() >= last_committed_decree(),
- "%" PRId64 " VS %" PRId64 "",
- max_prepared_decree(),
- last_committed_decree());
- dassert(last_committed_decree() >= last_durable_decree(),
- "%" PRId64 " VS %" PRId64 "",
- last_committed_decree(),
- last_durable_decree());
+ CHECK_GE(max_prepared_decree(), last_committed_decree());
+ CHECK_GE(last_committed_decree(), last_durable_decree());
}
void replica::execute_mutation(mutation_ptr &mu)
@@ -314,20 +308,14 @@ void replica::execute_mutation(mutation_ptr &mu)
case partition_status::PS_PRIMARY: {
ADD_POINT(mu->_tracer);
check_state_completeness();
- dassert(_app->last_committed_decree() + 1 == d,
- "app commit: %" PRId64 ", mutation decree: %" PRId64 "",
- _app->last_committed_decree(),
- d);
+ CHECK_EQ(_app->last_committed_decree() + 1, d);
err = _app->apply_mutation(mu);
} break;
case partition_status::PS_SECONDARY:
if (!_secondary_states.checkpoint_is_running) {
check_state_completeness();
- dassert(_app->last_committed_decree() + 1 == d,
- "%" PRId64 " VS %" PRId64 "",
- _app->last_committed_decree() + 1,
- d);
+ CHECK_EQ(_app->last_committed_decree() + 1, d);
err = _app->apply_mutation(mu);
} else {
LOG_DEBUG("%s: mutation %s commit to %s skipped, app.last_committed_decree = %" PRId64,
@@ -345,10 +333,7 @@ void replica::execute_mutation(mutation_ptr &mu)
if (_potential_secondary_states.learning_status == learner_status::LearningSucceeded ||
_potential_secondary_states.learning_status ==
learner_status::LearningWithPrepareTransient) {
- dassert(_app->last_committed_decree() + 1 == d,
- "%" PRId64 " VS %" PRId64 "",
- _app->last_committed_decree() + 1,
- d);
+ CHECK_EQ(_app->last_committed_decree() + 1, d);
err = _app->apply_mutation(mu);
} else {
LOG_DEBUG("%s: mutation %s commit to %s skipped, app.last_committed_decree = %" PRId64,
diff --git a/src/replica/replica_2pc.cpp b/src/replica/replica_2pc.cpp
index 9e22148db..ce6d9284b 100644
--- a/src/replica/replica_2pc.cpp
+++ b/src/replica/replica_2pc.cpp
@@ -163,9 +163,7 @@ void replica::on_client_write(dsn::message_ex *request, bool ignore_throttling)
void replica::init_prepare(mutation_ptr &mu, bool reconciliation, bool pop_all_committed_mutations)
{
- dassert(partition_status::PS_PRIMARY == status(),
- "invalid partition_status, status = %s",
- enum_to_string(status()));
+ CHECK_EQ(partition_status::PS_PRIMARY, status());
mu->_tracer->set_description("primary");
ADD_POINT(mu->_tracer);
@@ -230,10 +228,7 @@ void replica::init_prepare(mutation_ptr &mu, bool reconciliation, bool pop_all_c
goto ErrOut;
}
- dassert(mu->data.header.decree > last_committed_decree(),
- "%" PRId64 " VS %" PRId64 "",
- mu->data.header.decree,
- last_committed_decree());
+ CHECK_GT(mu->data.header.decree, last_committed_decree());
// local prepare
err = _prepare_list->prepare(mu, partition_status::PS_PRIMARY, pop_all_committed_mutations);
@@ -276,10 +271,8 @@ void replica::init_prepare(mutation_ptr &mu, bool reconciliation, bool pop_all_c
if (mu->is_logged()) {
do_possible_commit_on_primary(mu);
} else {
- dassert(mu->data.header.log_offset == invalid_offset,
- "invalid log offset, offset = %" PRId64,
- mu->data.header.log_offset);
- dassert(mu->log_task() == nullptr, "");
+ CHECK_EQ(mu->data.header.log_offset, invalid_offset);
+ CHECK(mu->log_task() == nullptr, "");
int64_t pending_size;
mu->log_task() = _private_log->append(mu,
LPC_WRITE_REPLICATION_LOG,
@@ -362,13 +355,8 @@ void replica::send_prepare_message(::dsn::rpc_address addr,
void replica::do_possible_commit_on_primary(mutation_ptr &mu)
{
- dassert(_config.ballot == mu->data.header.ballot,
- "invalid mutation ballot, %" PRId64 " VS %" PRId64 "",
- _config.ballot,
- mu->data.header.ballot);
- dassert(partition_status::PS_PRIMARY == status(),
- "invalid partition_status, status = %s",
- enum_to_string(status()));
+ CHECK_EQ(_config.ballot, mu->data.header.ballot);
+ CHECK_EQ(partition_status::PS_PRIMARY, status());
if (mu->is_ready_for_commit()) {
_prepare_list->commit(mu->data.header.decree, COMMIT_ALL_READY);
@@ -400,16 +388,8 @@ void replica::on_prepare(dsn::message_ex *request)
mu->_tracer->set_description("secondary");
ADD_POINT(mu->_tracer);
- dassert(mu->data.header.pid == rconfig.pid,
- "(%d.%d) VS (%d.%d)",
- mu->data.header.pid.get_app_id(),
- mu->data.header.pid.get_partition_index(),
- rconfig.pid.get_app_id(),
- rconfig.pid.get_partition_index());
- dassert(mu->data.header.ballot == rconfig.ballot,
- "invalid mutation ballot, %" PRId64 " VS %" PRId64 "",
- mu->data.header.ballot,
- rconfig.ballot);
+ CHECK_EQ(mu->data.header.pid, rconfig.pid);
+ CHECK_EQ(mu->data.header.ballot, rconfig.ballot);
if (mu->data.header.ballot < get_ballot()) {
LOG_ERROR("%s: mutation %s on_prepare skipped due to old view", name(), mu->name());
@@ -477,10 +457,7 @@ void replica::on_prepare(dsn::message_ex *request)
}
}
- dassert(rconfig.status == status(),
- "invalid status, %s VS %s",
- enum_to_string(rconfig.status),
- enum_to_string(status()));
+ CHECK_EQ(rconfig.status, status());
if (decree <= last_committed_decree()) {
ack_prepare_message(ERR_OK, mu);
return;
@@ -501,7 +478,7 @@ void replica::on_prepare(dsn::message_ex *request)
}
error_code err = _prepare_list->prepare(mu, status(), pop_all_committed_mutations);
- dassert(err == ERR_OK, "prepare mutation failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(err, ERR_OK, "prepare mutation failed");
if (partition_status::PS_POTENTIAL_SECONDARY == status() ||
partition_status::PS_SECONDARY == status()) {
@@ -525,7 +502,7 @@ void replica::on_prepare(dsn::message_ex *request)
_split_mgr->copy_mutation(mu);
}
- dassert(mu->log_task() == nullptr, "");
+ CHECK(mu->log_task() == nullptr, "");
mu->log_task() = _private_log->append(mu,
LPC_WRITE_REPLICATION_LOG,
&_tracker,
@@ -614,11 +591,7 @@ void replica::on_prepare_reply(std::pair<mutation_ptr, partition_status::type> p
mu->get_decree() <= last_committed_decree())
return;
- dassert(mu->data.header.ballot == get_ballot(),
- "%s: invalid mutation ballot, %" PRId64 " VS %" PRId64 "",
- mu->name(),
- mu->data.header.ballot,
- get_ballot());
+ CHECK_EQ_MSG(mu->data.header.ballot, get_ballot(), "{}: invalid mutation ballot", mu->name());
::dsn::rpc_address node = request->to_address;
partition_status::type st = _primary_states.get_node_status(node);
@@ -659,29 +632,21 @@ void replica::on_prepare_reply(std::pair<mutation_ptr, partition_status::type> p
}
if (resp.err == ERR_OK) {
- dassert(resp.ballot == get_ballot(),
- "invalid response ballot, %" PRId64 " VS %" PRId64 "",
- resp.ballot,
- get_ballot());
- dassert(resp.decree == mu->data.header.decree,
- "invalid response decree, %" PRId64 " VS %" PRId64 "",
- resp.decree,
- mu->data.header.decree);
+ CHECK_EQ(resp.ballot, get_ballot());
+ CHECK_EQ(resp.decree, mu->data.header.decree);
switch (target_status) {
case partition_status::PS_SECONDARY:
dassert(_primary_states.check_exist(node, partition_status::PS_SECONDARY),
"invalid secondary node address, address = %s",
node.to_string());
- dassert(mu->left_secondary_ack_count() > 0, "%u", mu->left_secondary_ack_count());
+ CHECK_GT(mu->left_secondary_ack_count(), 0);
if (0 == mu->decrease_left_secondary_ack_count()) {
do_possible_commit_on_primary(mu);
}
break;
case partition_status::PS_POTENTIAL_SECONDARY:
- dassert(mu->left_potential_secondary_ack_count() > 0,
- "%u",
- mu->left_potential_secondary_ack_count());
+ CHECK_GT(mu->left_potential_secondary_ack_count(), 0);
if (0 == mu->decrease_left_potential_secondary_ack_count()) {
do_possible_commit_on_primary(mu);
}
@@ -757,9 +722,7 @@ void replica::on_prepare_reply(std::pair<mutation_ptr, partition_status::type> p
// note targetStatus and (curent) status may diff
if (target_status == partition_status::PS_POTENTIAL_SECONDARY) {
- dassert(mu->left_potential_secondary_ack_count() > 0,
- "%u",
- mu->left_potential_secondary_ack_count());
+ CHECK_GT(mu->left_potential_secondary_ack_count(), 0);
if (0 == mu->decrease_left_potential_secondary_ack_count()) {
do_possible_commit_on_primary(mu);
}
diff --git a/src/replica/replica_backup.cpp b/src/replica/replica_backup.cpp
index 451df4387..2a999468e 100644
--- a/src/replica/replica_backup.cpp
+++ b/src/replica/replica_backup.cpp
@@ -535,10 +535,7 @@ void replica::trigger_async_checkpoint_for_backup(cold_backup_context_ptr backup
backup_context->checkpoint_decree = last_committed_decree();
} else { // backup_context->durable_decree_when_checkpoint != durable_decree
// checkpoint generated, but is behind checkpoint_decree, need trigger again
- dassert(backup_context->durable_decree_when_checkpoint < durable_decree,
- "durable_decree_when_checkpoint(%" PRId64 ") < durable_decree(%" PRId64 ")",
- backup_context->durable_decree_when_checkpoint,
- durable_decree);
+ CHECK_LT(backup_context->durable_decree_when_checkpoint, durable_decree);
LOG_INFO("%s: need trigger async checkpoint again", backup_context->name);
}
backup_context->checkpoint_timestamp = dsn_now_ms();
@@ -651,10 +648,7 @@ void replica::local_create_backup_checkpoint(cold_backup_context_ptr backup_cont
0,
std::chrono::seconds(10));
} else {
- dassert(last_decree >= backup_context->checkpoint_decree,
- "%" PRId64 " VS %" PRId64 "",
- last_decree,
- backup_context->checkpoint_decree);
+ CHECK_GE(last_decree, backup_context->checkpoint_decree);
backup_context->checkpoint_decree = last_decree; // update to real decree
std::string backup_checkpoint_dir_path = utils::filesystem::path_combine(
_app->backup_dir(),
diff --git a/src/replica/replica_check.cpp b/src/replica/replica_check.cpp
index 20068e147..82d51296a 100644
--- a/src/replica/replica_check.cpp
+++ b/src/replica/replica_check.cpp
@@ -60,7 +60,7 @@ void replica::init_group_check()
if (partition_status::PS_PRIMARY != status() || _options->group_check_disabled)
return;
- dassert(nullptr == _primary_states.group_check_task, "");
+ CHECK(nullptr == _primary_states.group_check_task, "");
_primary_states.group_check_task =
tasking::enqueue_timer(LPC_GROUP_CHECK,
&_tracker,
@@ -223,7 +223,7 @@ void replica::on_group_check_reply(error_code err,
}
auto r = _primary_states.group_check_pending_replies.erase(req->node);
- dassert(r == 1, "invalid node address, address = %s", req->node.to_string());
+ CHECK_EQ_MSG(r, 1, "invalid node address, address = {}", req->node);
if (err != ERR_OK || resp->err != ERR_OK) {
if (ERR_OK == err) {
diff --git a/src/replica/replica_config.cpp b/src/replica/replica_config.cpp
index 58ff311a3..1f6ae9197 100644
--- a/src/replica/replica_config.cpp
+++ b/src/replica/replica_config.cpp
@@ -112,16 +112,13 @@ void replica::on_config_proposal(configuration_update_request &proposal)
remove(proposal);
break;
default:
- dassert(false, "invalid config_type, type = %s", enum_to_string(proposal.type));
+ CHECK(false, "invalid config_type, type = {}", enum_to_string(proposal.type));
}
}
void replica::assign_primary(configuration_update_request &proposal)
{
- dassert(proposal.node == _stub->_primary_address,
- "%s VS %s",
- proposal.node.to_string(),
- _stub->_primary_address_str);
+ CHECK_EQ(proposal.node, _stub->_primary_address);
if (status() == partition_status::PS_PRIMARY) {
LOG_WARNING("%s: invalid assgin primary proposal as the node is in %s",
@@ -158,30 +155,16 @@ void replica::add_potential_secondary(configuration_update_request &proposal)
return;
}
- dassert(proposal.config.ballot == get_ballot(),
- "invalid ballot, %" PRId64 " VS %" PRId64 "",
- proposal.config.ballot,
- get_ballot());
- dassert(proposal.config.pid == _primary_states.membership.pid,
- "(%d.%d) VS (%d.%d)",
- proposal.config.pid.get_app_id(),
- proposal.config.pid.get_partition_index(),
- _primary_states.membership.pid.get_app_id(),
- _primary_states.membership.pid.get_partition_index());
- dassert(proposal.config.primary == _primary_states.membership.primary,
- "%s VS %s",
- proposal.config.primary.to_string(),
- _primary_states.membership.primary.to_string());
- dassert(proposal.config.secondaries == _primary_states.membership.secondaries,
- "count(%d) VS count(%d)",
- (int)proposal.config.secondaries.size(),
- (int)_primary_states.membership.secondaries.size());
- dassert(!_primary_states.check_exist(proposal.node, partition_status::PS_PRIMARY),
- "node = %s",
- proposal.node.to_string());
- dassert(!_primary_states.check_exist(proposal.node, partition_status::PS_SECONDARY),
- "node = %s",
- proposal.node.to_string());
+ CHECK_EQ(proposal.config.ballot, get_ballot());
+ CHECK_EQ(proposal.config.pid, _primary_states.membership.pid);
+ CHECK_EQ(proposal.config.primary, _primary_states.membership.primary);
+ CHECK(proposal.config.secondaries == _primary_states.membership.secondaries, "");
+ CHECK(!_primary_states.check_exist(proposal.node, partition_status::PS_PRIMARY),
+ "node = {}",
+ proposal.node);
+ CHECK(!_primary_states.check_exist(proposal.node, partition_status::PS_SECONDARY),
+ "node = {}",
+ proposal.node);
int potential_secondaries_count =
_primary_states.membership.secondaries.size() + _primary_states.learners.size();
@@ -205,7 +188,7 @@ void replica::add_potential_secondary(configuration_update_request &proposal)
proposal.node.to_string());
}
} else {
- dassert(false, "invalid config_type, type = %s", enum_to_string(proposal.type));
+ CHECK(false, "invalid config_type, type = {}", enum_to_string(proposal.type));
}
}
@@ -255,21 +238,10 @@ void replica::downgrade_to_secondary_on_primary(configuration_update_request &pr
if (proposal.config.ballot != get_ballot() || status() != partition_status::PS_PRIMARY)
return;
- dassert(proposal.config.pid == _primary_states.membership.pid,
- "(%d.%d) VS (%d.%d)",
- proposal.config.pid.get_app_id(),
- proposal.config.pid.get_partition_index(),
- _primary_states.membership.pid.get_app_id(),
- _primary_states.membership.pid.get_partition_index());
- dassert(proposal.config.primary == _primary_states.membership.primary,
- "%s VS %s",
- proposal.config.primary.to_string(),
- _primary_states.membership.primary.to_string());
- dassert(proposal.config.secondaries == _primary_states.membership.secondaries, "");
- dassert(proposal.node == proposal.config.primary,
- "%s VS %s",
- proposal.node.to_string(),
- proposal.config.primary.to_string());
+ CHECK_EQ(proposal.config.pid, _primary_states.membership.pid);
+ CHECK_EQ(proposal.config.primary, _primary_states.membership.primary);
+ CHECK(proposal.config.secondaries == _primary_states.membership.secondaries, "");
+ CHECK_EQ(proposal.node, proposal.config.primary);
proposal.config.primary.set_invalid();
proposal.config.secondaries.push_back(proposal.node);
@@ -283,23 +255,16 @@ void replica::downgrade_to_inactive_on_primary(configuration_update_request &pro
if (proposal.config.ballot != get_ballot() || status() != partition_status::PS_PRIMARY)
return;
- dassert(proposal.config.pid == _primary_states.membership.pid,
- "(%d.%d) VS (%d.%d)",
- proposal.config.pid.get_app_id(),
- proposal.config.pid.get_partition_index(),
- _primary_states.membership.pid.get_app_id(),
- _primary_states.membership.pid.get_partition_index());
- dassert(proposal.config.primary == _primary_states.membership.primary,
- "%s VS %s",
- proposal.config.primary.to_string(),
- _primary_states.membership.primary.to_string());
- dassert(proposal.config.secondaries == _primary_states.membership.secondaries, "");
+ CHECK_EQ(proposal.config.pid, _primary_states.membership.pid);
+ CHECK_EQ(proposal.config.primary, _primary_states.membership.primary);
+ CHECK(proposal.config.secondaries == _primary_states.membership.secondaries, "");
if (proposal.node == proposal.config.primary) {
proposal.config.primary.set_invalid();
} else {
- auto rt = replica_helper::remove_node(proposal.node, proposal.config.secondaries);
- dassert(rt, "remove node failed, node = %s", proposal.node.to_string());
+ CHECK(replica_helper::remove_node(proposal.node, proposal.config.secondaries),
+ "remove node failed, node = {}",
+ proposal.node);
}
update_configuration_on_meta_server(
@@ -311,31 +276,21 @@ void replica::remove(configuration_update_request &proposal)
if (proposal.config.ballot != get_ballot() || status() != partition_status::PS_PRIMARY)
return;
- dassert(proposal.config.pid == _primary_states.membership.pid,
- "(%d.%d) VS (%d.%d)",
- proposal.config.pid.get_app_id(),
- proposal.config.pid.get_partition_index(),
- _primary_states.membership.pid.get_app_id(),
- _primary_states.membership.pid.get_partition_index());
- dassert(proposal.config.primary == _primary_states.membership.primary,
- "%s VS %s",
- proposal.config.primary.to_string(),
- _primary_states.membership.primary.to_string());
- dassert(proposal.config.secondaries == _primary_states.membership.secondaries, "");
+ CHECK_EQ(proposal.config.pid, _primary_states.membership.pid);
+ CHECK_EQ(proposal.config.primary, _primary_states.membership.primary);
+ CHECK(proposal.config.secondaries == _primary_states.membership.secondaries, "");
auto st = _primary_states.get_node_status(proposal.node);
switch (st) {
case partition_status::PS_PRIMARY:
- dassert(proposal.config.primary == proposal.node,
- "%s VS %s",
- proposal.config.primary.to_string(),
- proposal.node.to_string());
+ CHECK_EQ(proposal.config.primary, proposal.node);
proposal.config.primary.set_invalid();
break;
case partition_status::PS_SECONDARY: {
- auto rt = replica_helper::remove_node(proposal.node, proposal.config.secondaries);
- dassert(rt, "remove_node failed, node = %s", proposal.node.to_string());
+ CHECK(replica_helper::remove_node(proposal.node, proposal.config.secondaries),
+ "remove_node failed, node = {}",
+ proposal.node);
} break;
case partition_status::PS_POTENTIAL_SECONDARY:
break;
@@ -370,9 +325,7 @@ void replica::on_remove(const replica_configuration &request)
return;
}
- dassert(request.status == partition_status::PS_INACTIVE,
- "invalid partition_status, status = %s",
- enum_to_string(request.status));
+ CHECK_EQ(request.status, partition_status::PS_INACTIVE);
update_local_configuration(request);
}
@@ -390,20 +343,14 @@ void replica::update_configuration_on_meta_server(config_type::type type,
newConfig.last_committed_decree = last_committed_decree();
if (type == config_type::CT_PRIMARY_FORCE_UPDATE_BALLOT) {
- dassert(status() == partition_status::PS_INACTIVE && _inactive_is_transient &&
- _is_initializing,
- "");
- dassert(
- newConfig.primary == node, "%s VS %s", newConfig.primary.to_string(), node.to_string());
+ CHECK(status() == partition_status::PS_INACTIVE && _inactive_is_transient &&
+ _is_initializing,
+ "");
+ CHECK_EQ(newConfig.primary, node);
} else if (type != config_type::CT_ASSIGN_PRIMARY &&
type != config_type::CT_UPGRADE_TO_PRIMARY) {
- dassert(status() == partition_status::PS_PRIMARY,
- "partition status must be primary, status = %s",
- enum_to_string(status()));
- dassert(newConfig.ballot == _primary_states.membership.ballot,
- "invalid ballot, %" PRId64 " VS %" PRId64 "",
- newConfig.ballot,
- _primary_states.membership.ballot);
+ CHECK_EQ(status(), partition_status::PS_PRIMARY);
+ CHECK_EQ(newConfig.ballot, _primary_states.membership.ballot);
}
// disable 2pc during reconfiguration
@@ -514,17 +461,9 @@ void replica::on_update_configuration_on_meta_server_reply(
// post-update work items?
if (resp.err == ERR_OK) {
- dassert(req->config.pid == resp.config.pid,
- "(%d.%d) VS (%d.%d)",
- req->config.pid.get_app_id(),
- req->config.pid.get_partition_index(),
- resp.config.pid.get_app_id(),
- resp.config.pid.get_partition_index());
- dassert(req->config.primary == resp.config.primary,
- "%s VS %s",
- req->config.primary.to_string(),
- resp.config.primary.to_string());
- dassert(req->config.secondaries == resp.config.secondaries, "");
+ CHECK_EQ(req->config.pid, resp.config.pid);
+ CHECK_EQ(req->config.primary, resp.config.primary);
+ CHECK(req->config.secondaries == resp.config.secondaries, "");
switch (req->type) {
case config_type::CT_UPGRADE_TO_PRIMARY:
@@ -546,11 +485,11 @@ void replica::on_update_configuration_on_meta_server_reply(
}
break;
case config_type::CT_PRIMARY_FORCE_UPDATE_BALLOT:
- dassert(_is_initializing, "");
+ CHECK(_is_initializing, "");
_is_initializing = false;
break;
default:
- dassert(false, "invalid config_type, type = %s", enum_to_string(req->type));
+ CHECK(false, "invalid config_type, type = {}", enum_to_string(req->type));
}
}
@@ -652,10 +591,7 @@ void replica::query_app_envs(/*out*/ std::map<std::string, std::string> &envs)
bool replica::update_configuration(const partition_configuration &config)
{
- dassert(config.ballot >= get_ballot(),
- "invalid ballot, %" PRId64 " VS %" PRId64 "",
- config.ballot,
- get_ballot());
+ CHECK_GE(config.ballot, get_ballot());
replica_configuration rconfig;
replica_helper::get_replica_config(config, _stub->_primary_address, rconfig);
@@ -706,16 +642,11 @@ bool replica::update_local_configuration(const replica_configuration &config,
return true;
});
- dassert(config.ballot > get_ballot() || (same_ballot && config.ballot == get_ballot()),
- "invalid ballot, %" PRId64 " VS %" PRId64 "",
- config.ballot,
- get_ballot());
- dassert(config.pid == get_gpid(),
- "(%d.%d) VS (%d.%d)",
- config.pid.get_app_id(),
- config.pid.get_partition_index(),
- get_gpid().get_app_id(),
- get_gpid().get_partition_index());
+ CHECK(config.ballot > get_ballot() || (same_ballot && config.ballot == get_ballot()),
+ "invalid ballot, {} VS {}",
+ config.ballot,
+ get_ballot());
+ CHECK_EQ(config.pid, get_gpid());
partition_status::type old_status = status();
ballot old_ballot = get_ballot();
@@ -827,10 +758,7 @@ bool replica::update_local_configuration(const replica_configuration &config,
_split_mgr->parent_cleanup_split_context();
}
_last_config_change_time_ms = dsn_now_ms();
- dassert(max_prepared_decree() >= last_committed_decree(),
- "%" PRId64 " VS %" PRId64 "",
- max_prepared_decree(),
- last_committed_decree());
+ CHECK_GE(max_prepared_decree(), last_committed_decree());
_bulk_loader->clear_bulk_load_states_if_needed(old_status, config.status);
@@ -877,10 +805,10 @@ bool replica::update_local_configuration(const replica_configuration &config,
clear_cold_backup_state();
break;
case partition_status::PS_POTENTIAL_SECONDARY:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
break;
default:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
}
break;
case partition_status::PS_SECONDARY:
@@ -913,20 +841,20 @@ bool replica::update_local_configuration(const replica_configuration &config,
// _secondary_states.cleanup(true); => do it in close as it may block
break;
default:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
}
break;
case partition_status::PS_POTENTIAL_SECONDARY:
switch (config.status) {
case partition_status::PS_PRIMARY:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
break;
case partition_status::PS_SECONDARY:
_prepare_list->truncate(_app->last_committed_decree());
// using force cleanup now as all tasks must be done already
r = _potential_secondary_states.cleanup(true);
- dassert(r, "%s: potential secondary context cleanup failed", name());
+ CHECK(r, "{}: potential secondary context cleanup failed", name());
check_state_completeness();
break;
@@ -939,10 +867,10 @@ bool replica::update_local_configuration(const replica_configuration &config,
_potential_secondary_states.cleanup(false);
// => do this in close as it may block
// r = _potential_secondary_states.cleanup(true);
- // dassert(r, "%s: potential secondary context cleanup failed", name());
+ // CHECK(r, "{}: potential secondary context cleanup failed", name());
break;
default:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
}
break;
case partition_status::PS_PARTITION_SPLIT:
@@ -956,7 +884,7 @@ bool replica::update_local_configuration(const replica_configuration &config,
_split_states.cleanup(true);
break;
case partition_status::PS_POTENTIAL_SECONDARY:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
break;
case partition_status::PS_INACTIVE:
break;
@@ -964,7 +892,7 @@ bool replica::update_local_configuration(const replica_configuration &config,
_split_states.cleanup(false);
break;
default:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
}
break;
case partition_status::PS_INACTIVE:
@@ -975,13 +903,13 @@ bool replica::update_local_configuration(const replica_configuration &config,
}
switch (config.status) {
case partition_status::PS_PRIMARY:
- dassert(_inactive_is_transient, "must be in transient state for being primary next");
+ CHECK(_inactive_is_transient, "must be in transient state for being primary next");
_inactive_is_transient = false;
init_group_check();
replay_prepare_list();
break;
case partition_status::PS_SECONDARY:
- dassert(_inactive_is_transient, "must be in transient state for being secondary next");
+ CHECK(_inactive_is_transient, "must be in transient state for being secondary next");
_inactive_is_transient = false;
break;
case partition_status::PS_POTENTIAL_SECONDARY:
@@ -1003,31 +931,31 @@ bool replica::update_local_configuration(const replica_configuration &config,
_inactive_is_transient = false;
break;
default:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
}
break;
case partition_status::PS_ERROR:
switch (config.status) {
case partition_status::PS_PRIMARY:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
break;
case partition_status::PS_SECONDARY:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
break;
case partition_status::PS_POTENTIAL_SECONDARY:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
break;
case partition_status::PS_INACTIVE:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
break;
case partition_status::PS_ERROR:
break;
default:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
}
break;
default:
- dassert(false, "invalid execution path");
+ CHECK(false, "invalid execution path");
}
LOG_INFO("%s: status change %s @ %" PRId64 " => %s @ %" PRId64 ", pre(%" PRId64 ", %" PRId64
diff --git a/src/replica/replica_failover.cpp b/src/replica/replica_failover.cpp
index 7ea95a108..9d3852fc2 100644
--- a/src/replica/replica_failover.cpp
+++ b/src/replica/replica_failover.cpp
@@ -67,9 +67,7 @@ void replica::handle_remote_failure(partition_status::type st,
enum_to_string(st),
node.to_string());
- dassert(status() == partition_status::PS_PRIMARY,
- "invalid partition_status, status = %s",
- enum_to_string(status()));
+ CHECK_EQ(status(), partition_status::PS_PRIMARY);
dassert(
node != _stub->_primary_address, "%s VS %s", node.to_string(), _stub->_primary_address_str);
diff --git a/src/replica/replica_init.cpp b/src/replica/replica_init.cpp
index 1d90786a7..6ee6626cc 100644
--- a/src/replica/replica_init.cpp
+++ b/src/replica/replica_init.cpp
@@ -218,12 +218,12 @@ decree replica::get_replay_start_decree()
error_code replica::init_app_and_prepare_list(bool create_new)
{
- dassert(nullptr == _app, "");
+ CHECK(nullptr == _app, "");
error_code err;
std::string log_dir = utils::filesystem::path_combine(dir(), "plog");
_app.reset(replication_app_base::new_storage_instance(_app_info.app_type, this));
- dassert(nullptr == _private_log, "private log must not be initialized yet");
+ CHECK(nullptr == _private_log, "");
if (create_new) {
err = _app->open_new_internal(this, _stub->_log->on_partition_reset(get_gpid(), 0), 0);
@@ -240,10 +240,7 @@ error_code replica::init_app_and_prepare_list(bool create_new)
} else {
err = _app->open_internal(this);
if (err == ERR_OK) {
- dassert(_app->last_committed_decree() == _app->last_durable_decree(),
- "invalid app state, %" PRId64 " VS %" PRId64 "",
- _app->last_committed_decree(),
- _app->last_durable_decree());
+ CHECK_EQ(_app->last_committed_decree(), _app->last_durable_decree());
_config.ballot = _app->init_info().init_ballot;
_prepare_list->reset(_app->last_committed_decree());
diff --git a/src/replica/replica_learn.cpp b/src/replica/replica_learn.cpp
index 4015abb80..a9e31b49b 100644
--- a/src/replica/replica_learn.cpp
+++ b/src/replica/replica_learn.cpp
@@ -408,10 +408,7 @@ void replica::on_learn(dsn::message_ex *msg, const learn_request &request)
}
}
- dassert(request.last_committed_decree_in_app <= local_committed_decree,
- "%" PRId64 " VS %" PRId64 "",
- request.last_committed_decree_in_app,
- local_committed_decree);
+ CHECK_LE(request.last_committed_decree_in_app, local_committed_decree);
const decree learn_start_decree = get_learn_start_decree(request);
response.state.__set_learn_start_decree(learn_start_decree);
@@ -558,13 +555,8 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response
{
_checker.only_one_thread_access();
- dassert(partition_status::PS_POTENTIAL_SECONDARY == status(),
- "invalid partition status, status = %s",
- enum_to_string(status()));
- dassert(req.signature == (int64_t)_potential_secondary_states.learning_version,
- "invalid learn signature, %" PRId64 " VS %" PRId64 "",
- req.signature,
- (int64_t)_potential_secondary_states.learning_version);
+ CHECK_EQ(partition_status::PS_POTENTIAL_SECONDARY, status());
+ CHECK_EQ(req.signature, _potential_secondary_states.learning_version);
if (err != ERR_OK) {
handle_learning_error(err, false);
@@ -697,8 +689,8 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response
}
if (err == ERR_OK) {
- dassert(_app->last_committed_decree() == 0, "must be zero after app::open(true)");
- dassert(_app->last_durable_decree() == 0, "must be zero after app::open(true)");
+ CHECK_EQ_MSG(_app->last_committed_decree(), 0, "must be zero after app::open(true)");
+ CHECK_EQ_MSG(_app->last_durable_decree(), 0, "must be zero after app::open(true)");
// reset prepare list
_prepare_list->reset(0);
@@ -761,14 +753,10 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response
}
if (resp.prepare_start_decree != invalid_decree) {
- dassert(resp.type == learn_type::LT_CACHE,
- "invalid learn_type, type = %s",
- enum_to_string(resp.type));
- dassert(resp.state.files.size() == 0, "");
- dassert(_potential_secondary_states.learning_status ==
- learner_status::LearningWithoutPrepare,
- "invalid learning_status, status = %s",
- enum_to_string(_potential_secondary_states.learning_status));
+ CHECK_EQ(resp.type, learn_type::LT_CACHE);
+ CHECK(resp.state.files.empty(), "");
+ CHECK_EQ(_potential_secondary_states.learning_status,
+ learner_status::LearningWithoutPrepare);
_potential_secondary_states.learning_status = learner_status::LearningWithPrepareTransient;
// reset log positions for later mutations
@@ -859,12 +847,8 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response
// further states are synced using 2pc, and we must commit now as those later 2pc messages
// thinks they should
_prepare_list->commit(resp.prepare_start_decree - 1, COMMIT_TO_DECREE_HARD);
- dassert(_prepare_list->last_committed_decree() == _app->last_committed_decree(),
- "last_committed_decree of prepare_list and app isn't equal, %" PRId64 " VS %" PRId64
- "",
- _prepare_list->last_committed_decree(),
- _app->last_committed_decree());
- dassert(resp.state.files.size() == 0, "");
+ CHECK_EQ(_prepare_list->last_committed_decree(), _app->last_committed_decree());
+ CHECK(resp.state.files.empty(), "");
// all state is complete
dassert(_app->last_committed_decree() + 1 >=
@@ -1078,13 +1062,11 @@ void replica::on_copy_remote_state_completed(error_code err,
if (err != ERR_OK) {
// do nothing
} else if (_potential_secondary_states.learning_status == learner_status::LearningWithPrepare) {
- dassert(resp.type == learn_type::LT_CACHE,
- "invalid learn_type, type = %s",
- enum_to_string(resp.type));
+ CHECK_EQ(resp.type, learn_type::LT_CACHE);
} else {
- dassert(resp.type == learn_type::LT_APP || resp.type == learn_type::LT_LOG,
- "invalid learn_type, type = %s",
- enum_to_string(resp.type));
+ CHECK(resp.type == learn_type::LT_APP || resp.type == learn_type::LT_LOG,
+ "invalid learn_type, type = {}",
+ enum_to_string(resp.type));
learn_state lstate;
lstate.from_decree_excluded = resp.state.from_decree_excluded;
@@ -1105,17 +1087,11 @@ void replica::on_copy_remote_state_completed(error_code err,
err = _app->apply_checkpoint(replication_app_base::chkpt_apply_mode::learn, lstate);
if (err == ERR_OK) {
- dassert(_app->last_committed_decree() >= _app->last_durable_decree(),
- "invalid app state, %" PRId64 " VS %" PRId64 "",
- _app->last_committed_decree(),
- _app->last_durable_decree());
+ CHECK_GE(_app->last_committed_decree(), _app->last_durable_decree());
// because if the original _app->last_committed_decree > resp.last_committed_decree,
// the learn_start_decree will be set to 0, which makes learner to learn from
// scratch
- dassert(_app->last_committed_decree() <= resp.last_committed_decree,
- "invalid app state, %" PRId64 " VS %" PRId64 "",
- _app->last_committed_decree(),
- resp.last_committed_decree);
+ CHECK_LE(_app->last_committed_decree(), resp.last_committed_decree);
LOG_INFO("%s: on_copy_remote_state_completed[%016" PRIx64
"]: learnee = %s, learn_duration = %" PRIu64 " ms, "
"checkpoint duration = %" PRIu64
@@ -1219,10 +1195,7 @@ void replica::on_copy_remote_state_completed(error_code err,
_app->last_durable_decree());
if (err == ERR_OK) {
- dassert(_app->last_committed_decree() == _app->last_durable_decree(),
- "%" PRId64 " VS %" PRId64 "",
- _app->last_committed_decree(),
- _app->last_durable_decree());
+ CHECK_EQ(_app->last_committed_decree(), _app->last_durable_decree());
}
}
@@ -1420,16 +1393,9 @@ void replica::on_learn_completion_notification_reply(error_code err,
{
_checker.only_one_thread_access();
- dassert(partition_status::PS_POTENTIAL_SECONDARY == status(),
- "invalid partition_status, status = %s",
- enum_to_string(status()));
- dassert(_potential_secondary_states.learning_status == learner_status::LearningSucceeded,
- "invalid learner_status, status = %s",
- enum_to_string(_potential_secondary_states.learning_status));
- dassert(report.learner_signature == (int64_t)_potential_secondary_states.learning_version,
- "%" PRId64 " VS %" PRId64 "",
- report.learner_signature,
- (int64_t)_potential_secondary_states.learning_version);
+ CHECK_EQ(partition_status::PS_POTENTIAL_SECONDARY, status());
+ CHECK_EQ(_potential_secondary_states.learning_status, learner_status::LearningSucceeded);
+ CHECK_EQ(report.learner_signature, _potential_secondary_states.learning_version);
if (err != ERR_OK) {
handle_learning_error(err, false);
diff --git a/src/replica/replica_restore.cpp b/src/replica/replica_restore.cpp
index 5f4d0af2c..9fd8066fb 100644
--- a/src/replica/replica_restore.cpp
+++ b/src/replica/replica_restore.cpp
@@ -93,12 +93,13 @@ bool replica::read_cold_backup_metadata(const std::string &file,
return false;
}
fin.read(buf.get(), file_sz);
- dassert(file_sz == fin.gcount(),
- "%s: read file(%s) failed, need %" PRId64 ", but read %" PRId64 "",
- name(),
- file.c_str(),
- file_sz,
- fin.gcount());
+ CHECK_EQ_MSG(file_sz,
+ fin.gcount(),
+ "{}: read file({}) failed, need {}, but read {}",
+ name(),
+ file,
+ file_sz,
+ fin.gcount());
fin.close();
buf.get()[fin.gcount()] = '\0';
diff --git a/src/replica/replica_stub.cpp b/src/replica/replica_stub.cpp
index 6952b4549..6d7395c0f 100644
--- a/src/replica/replica_stub.cpp
+++ b/src/replica/replica_stub.cpp
@@ -569,13 +569,10 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f
r->last_prepared_decree());
utils::auto_lock<utils::ex_lock> l(rps_lock);
-
- if (rps.find(r->get_gpid()) != rps.end()) {
- dassert(false,
- "conflict replica dir: %s <--> %s",
- r->dir().c_str(),
- rps[r->get_gpid()]->dir().c_str());
- }
+ CHECK(rps.find(r->get_gpid()) == rps.end(),
+ "conflict replica dir: {} <--> {}",
+ r->dir(),
+ rps[r->get_gpid()]->dir());
rps[r->get_gpid()] = r;
}
@@ -637,8 +634,10 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f
const char *dir = it->second->dir().c_str();
char rename_dir[1024];
sprintf(rename_dir, "%s.%" PRIu64 ".err", dir, dsn_now_us());
- bool ret = dsn::utils::filesystem::rename_path(dir, rename_dir);
- dassert(ret, "init_replica: failed to move directory '%s' to '%s'", dir, rename_dir);
+ CHECK(dsn::utils::filesystem::rename_path(dir, rename_dir),
+ "init_replica: failed to move directory '{}' to '{}'",
+ dir,
+ rename_dir);
LOG_WARNING("init_replica: {replica_dir_op} succeed to move directory '%s' to '%s'",
dir,
rename_dir);
@@ -649,21 +648,21 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f
// restart log service
_log->close();
_log = nullptr;
- if (!utils::filesystem::remove_path(_options.slog_dir)) {
- dassert(false, "remove directory %s failed", _options.slog_dir.c_str());
- }
+ CHECK(utils::filesystem::remove_path(_options.slog_dir),
+ "remove directory {} failed",
+ _options.slog_dir);
_log = new mutation_log_shared(_options.slog_dir,
_options.log_shared_file_size_mb,
_options.log_shared_force_flush,
&_counter_shared_log_recent_write_size);
- auto lerr = _log->open(nullptr, [this](error_code err) { this->handle_log_failure(err); });
- dassert(lerr == ERR_OK, "restart log service must succeed");
+ CHECK_EQ_MSG(_log->open(nullptr, [this](error_code err) { this->handle_log_failure(err); }),
+ ERR_OK,
+ "restart log service failed");
}
bool is_log_complete = true;
for (auto it = rps.begin(); it != rps.end(); ++it) {
- auto err = it->second->background_sync_checkpoint();
- dassert(err == ERR_OK, "sync checkpoint failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(it->second->background_sync_checkpoint(), ERR_OK, "sync checkpoint failed");
it->second->reset_prepare_list_after_replay();
@@ -780,10 +779,11 @@ void replica_stub::initialize_fs_manager(std::vector<std::string> &data_dirs,
count++;
}
- dassert_f(available_dirs.size() > 0,
- "initialize fs manager failed, no available data directory");
- error_code err = _fs_manager.initialize(available_dirs, available_dir_tags, false);
- dassert_f(err == dsn::ERR_OK, "initialize fs manager failed, err({})", err);
+ CHECK_GT_MSG(
+ available_dirs.size(), 0, "initialize fs manager failed, no available data directory");
+ CHECK_EQ_MSG(_fs_manager.initialize(available_dirs, available_dir_tags, false),
+ dsn::ERR_OK,
+ "initialize fs manager failed");
}
void replica_stub::initialize_start()
@@ -824,18 +824,19 @@ void replica_stub::initialize_start()
_backup_server = dsn::make_unique<replica_backup_server>(this);
// init liveness monitor
- dassert(NS_Disconnected == _state, "");
+ CHECK_EQ(NS_Disconnected, _state);
if (_options.fd_disabled == false) {
_failure_detector = std::make_shared<dsn::dist::slave_failure_detector_with_multimaster>(
_options.meta_servers,
[this]() { this->on_meta_server_disconnected(); },
[this]() { this->on_meta_server_connected(); });
- auto err = _failure_detector->start(_options.fd_check_interval_seconds,
- _options.fd_beacon_interval_seconds,
- _options.fd_lease_seconds,
- _options.fd_grace_seconds);
- dassert(err == ERR_OK, "FD start failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(_failure_detector->start(_options.fd_check_interval_seconds,
+ _options.fd_beacon_interval_seconds,
+ _options.fd_lease_seconds,
+ _options.fd_grace_seconds),
+ ERR_OK,
+ "FD start failed");
_failure_detector->register_master(_failure_detector->current_server_contact());
} else {
@@ -1488,7 +1489,7 @@ void replica_stub::set_meta_server_connected_for_test(
const configuration_query_by_node_response &resp)
{
zauto_lock l(_state_lock);
- dassert(_state != NS_Connected, "");
+ CHECK_NE(_state, NS_Connected);
_state = NS_Connected;
for (auto it = resp.partitions.begin(); it != resp.partitions.end(); ++it) {
@@ -1657,7 +1658,7 @@ void replica_stub::response_client(gpid id,
void replica_stub::init_gc_for_test()
{
- dassert(_options.gc_disabled, "");
+ CHECK(_options.gc_disabled, "");
_gc_timer_task = tasking::enqueue(LPC_GARBAGE_COLLECT_LOGS_AND_REPLICAS,
&_tracker,
@@ -2055,16 +2056,16 @@ void replica_stub::open_replica(
// NOTICE: if dir a.b.pegasus does not exist, or .app-info does not exist, but the ballot >
// 0, or the last_committed_decree > 0, start replica will fail
if ((configuration_update != nullptr) && (configuration_update->info.is_stateful)) {
- dassert_f(configuration_update->config.ballot == 0 &&
- configuration_update->config.last_committed_decree == 0,
- "{}@{}: cannot load replica({}.{}), ballot = {}, "
- "last_committed_decree = {}, but it does not existed!",
- id.to_string(),
- _primary_address_str,
- id.to_string(),
- app.app_type.c_str(),
- configuration_update->config.ballot,
- configuration_update->config.last_committed_decree);
+ CHECK(configuration_update->config.ballot == 0 &&
+ configuration_update->config.last_committed_decree == 0,
+ "{}@{}: cannot load replica({}.{}), ballot = {}, "
+ "last_committed_decree = {}, but it does not existed!",
+ id.to_string(),
+ _primary_address_str,
+ id.to_string(),
+ app.app_type.c_str(),
+ configuration_update->config.ballot,
+ configuration_update->config.last_committed_decree);
}
// NOTICE: only new_replica_group's assign_primary will execute this; if server restart when
@@ -2090,10 +2091,9 @@ void replica_stub::open_replica(
// replica(if contain valid data, it will execute load-process)
if (!restore_if_necessary && ::dsn::utils::filesystem::directory_exists(dir)) {
- if (!::dsn::utils::filesystem::remove_path(dir)) {
- dassert(false, "remove useless directory(%s) failed", dir.c_str());
- return;
- }
+ CHECK(::dsn::utils::filesystem::remove_path(dir),
+ "remove useless directory({}) failed",
+ dir);
}
rep = replica::newr(this, id, app, restore_if_necessary, is_duplication_follower);
}
@@ -2103,20 +2103,25 @@ void replica_stub::open_replica(
id.to_string(),
_primary_address_str);
zauto_write_lock l(_replicas_lock);
- auto ret = _opening_replicas.erase(id);
- dassert(ret > 0, "replica %s is not in _opening_replicas", id.to_string());
+ CHECK_GT_MSG(_opening_replicas.erase(id),
+ 0,
+ "replica {} is not in _opening_replicas",
+ id.to_string());
_counter_replicas_opening_count->decrement();
return;
}
{
zauto_write_lock l(_replicas_lock);
- auto ret = _opening_replicas.erase(id);
- dassert(ret > 0, "replica %s is not in _opening_replicas", id.to_string());
+ CHECK_GT_MSG(_opening_replicas.erase(id),
+ 0,
+ "replica {} is not in _opening_replicas",
+ id.to_string());
_counter_replicas_opening_count->decrement();
- auto it = _replicas.find(id);
- dassert(it == _replicas.end(), "replica %s is already in _replicas", id.to_string());
+ CHECK(_replicas.find(id) == _replicas.end(),
+ "replica {} is already in _replicas",
+ id.to_string());
_replicas.insert(replicas::value_type(rep->get_gpid(), rep));
_counter_replicas_count->increment();
@@ -2138,14 +2143,14 @@ void replica_stub::open_replica(
task_ptr replica_stub::begin_close_replica(replica_ptr r)
{
- dassert_f(r->status() == partition_status::PS_ERROR ||
- r->status() == partition_status::PS_INACTIVE ||
- r->disk_migrator()->status() >= disk_migration_status::MOVED,
- "invalid state(partition_status={}, migration_status={}) when calling "
- "replica({}) close",
- enum_to_string(r->status()),
- enum_to_string(r->disk_migrator()->status()),
- r->name());
+ CHECK(r->status() == partition_status::PS_ERROR ||
+ r->status() == partition_status::PS_INACTIVE ||
+ r->disk_migrator()->status() >= disk_migration_status::MOVED,
+ "invalid state(partition_status={}, migration_status={}) when calling "
+ "replica({}) close",
+ enum_to_string(r->status()),
+ enum_to_string(r->disk_migrator()->status()),
+ r->name());
gpid id = r->get_gpid();
@@ -2190,9 +2195,7 @@ void replica_stub::close_replica(replica_ptr r)
{
zauto_write_lock l(_replicas_lock);
auto find = _closing_replicas.find(id);
- dassert(find != _closing_replicas.end(),
- "replica %s is not in _closing_replicas",
- name.c_str());
+ CHECK(find != _closing_replicas.end(), "replica {} is not in _closing_replicas", name);
_closed_replicas.emplace(
id, std::make_pair(std::get<2>(find->second), std::get<3>(find->second)));
_closing_replicas.erase(find);
@@ -2224,9 +2227,7 @@ void replica_stub::trigger_checkpoint(replica_ptr r, bool is_emergency)
void replica_stub::handle_log_failure(error_code err)
{
LOG_ERROR("handle log failure: %s", err.to_string());
- if (!s_not_exit_on_log_failure) {
- dassert(false, "TODO: better log failure handling ...");
- }
+ CHECK(s_not_exit_on_log_failure, "");
}
void replica_stub::open_service()
@@ -2685,9 +2686,10 @@ void replica_stub::close()
_replicas_lock.lock_write();
// task will automatically remove this replica from _closing_replicas
if (!_closing_replicas.empty()) {
- dassert(tmp_gpid != _closing_replicas.begin()->first,
- "this replica '%s' should have been removed from _closing_replicas",
- tmp_gpid.to_string());
+ CHECK_NE_MSG(tmp_gpid,
+ _closing_replicas.begin()->first,
+ "this replica '{}' should has been removed",
+ tmp_gpid.to_string());
}
}
@@ -2720,10 +2722,7 @@ std::string replica_stub::get_replica_dir(const char *app_type, gpid id, bool cr
for (const std::string &data_dir : _fs_manager.get_available_data_dirs()) {
std::string dir = utils::filesystem::path_combine(data_dir, gpid_str);
if (utils::filesystem::directory_exists(dir)) {
- if (is_dir_exist) {
- dassert(
- false, "replica dir conflict: %s <--> %s", dir.c_str(), replica_dir.c_str());
- }
+ CHECK(!is_dir_exist, "replica dir conflict: {} <--> {}", dir, replica_dir);
replica_dir = dir;
is_dir_exist = true;
}
diff --git a/src/replica/storage/simple_kv/simple_kv.server.impl.cpp b/src/replica/storage/simple_kv/simple_kv.server.impl.cpp
index d6dc2590c..abb41a07e 100644
--- a/src/replica/storage/simple_kv/simple_kv.server.impl.cpp
+++ b/src/replica/storage/simple_kv/simple_kv.server.impl.cpp
@@ -114,9 +114,9 @@ void simple_kv_service_impl::on_append(const kv_pair &pr, ::dsn::rpc_replier<int
{
zauto_lock l(_lock);
if (clear_state) {
- if (!dsn::utils::filesystem::remove_path(_dir_data)) {
- dassert(false, "Fail to delete directory %s.", _dir_data.c_str());
- }
+ CHECK(dsn::utils::filesystem::remove_path(_dir_data),
+ "Fail to delete directory {}",
+ _dir_data);
reset_state();
}
}
@@ -136,9 +136,9 @@ void simple_kv_service_impl::recover()
std::vector<std::string> sub_list;
std::string path = _dir_data;
- if (!dsn::utils::filesystem::get_subfiles(path, sub_list, false)) {
- dassert(false, "Fail to get subfiles in %s.", path.c_str());
- }
+ CHECK(dsn::utils::filesystem::get_subfiles(path, sub_list, false),
+ "Fail to get subfiles in {}",
+ path);
for (auto &fpath : sub_list) {
auto &&s = dsn::utils::filesystem::get_file_name(fpath);
if (s.substr(0, strlen("checkpoint.")) != std::string("checkpoint."))
@@ -173,7 +173,7 @@ void simple_kv_service_impl::recover(const std::string &name, int64_t version)
is.read((char *)&count, sizeof(count));
is.read((char *)&magic, sizeof(magic));
- dassert(magic == 0xdeadbeef, "invalid checkpoint");
+ CHECK_EQ_MSG(magic, 0xdeadbeef, "invalid checkpoint");
for (uint64_t i = 0; i < count; i++) {
std::string key;
@@ -204,7 +204,7 @@ void simple_kv_service_impl::recover(const std::string &name, int64_t version)
zauto_lock l(_lock);
if (last_commit == last_durable_decree()) {
- dassert(utils::filesystem::file_exists(name), "checkpoint file %s is missing!", name);
+ CHECK(utils::filesystem::file_exists(name), "checkpoint file {} is missing!", name);
return ERR_OK;
}
@@ -269,9 +269,8 @@ void simple_kv_service_impl::recover(const std::string &name, int64_t version)
recover(state.files[0], state.to_decree_included);
return ERR_OK;
} else {
- dassert(chkpt_apply_mode::copy == mode, "invalid mode %d", (int)mode);
- dassert(state.to_decree_included > last_durable_decree(),
- "checkpoint's decree is smaller than current");
+ CHECK_EQ_MSG(chkpt_apply_mode::copy, mode, "invalid mode");
+ CHECK_GT(state.to_decree_included, last_durable_decree());
char name[256];
sprintf(name, "%s/checkpoint.%" PRId64, _dir_data.c_str(), state.to_decree_included);
diff --git a/src/replica/storage/simple_kv/test/case.cpp b/src/replica/storage/simple_kv/test/case.cpp
index 9fac4baf8..b118582a8 100644
--- a/src/replica/storage/simple_kv/test/case.cpp
+++ b/src/replica/storage/simple_kv/test/case.cpp
@@ -903,7 +903,7 @@ void client_case_line::get_write_params(int &id,
std::string &value,
int &timeout_ms) const
{
- dassert(_type == begin_write, "");
+ CHECK_EQ(_type, begin_write);
id = _id;
key = _key;
value = _value;
@@ -912,7 +912,7 @@ void client_case_line::get_write_params(int &id,
void client_case_line::get_read_params(int &id, std::string &key, int &timeout_ms) const
{
- dassert(_type == begin_read, "");
+ CHECK_EQ(_type, begin_read);
id = _id;
key = _key;
timeout_ms = _timeout;
@@ -922,7 +922,7 @@ void client_case_line::get_replica_config_params(rpc_address &receiver,
dsn::replication::config_type::type &type,
rpc_address &node) const
{
- dassert(_type == replica_config, "");
+ CHECK_EQ(_type, replica_config);
receiver = _config_receiver;
type = _config_type;
node = _config_node;
@@ -1039,7 +1039,7 @@ bool test_case::init(const std::string &case_input)
void test_case::forward()
{
_null_loop_count = 0; // reset null loop count
- dassert(_next < _case_lines.size(), "");
+ CHECK_LT(_next, _case_lines.size());
while (true) {
case_line *cl = _case_lines[_next];
if (cl != nullptr) {
@@ -1078,7 +1078,7 @@ void test_case::forward()
void test_case::fail(const std::string &other)
{
_null_loop_count = 0; // reset null loop count
- dassert(_next < _case_lines.size(), "");
+ CHECK_LT(_next, _case_lines.size());
case_line *cl = _case_lines[_next];
output(other);
print(cl, other);
@@ -1097,8 +1097,8 @@ void test_case::output(const std::string &line)
void test_case::print(case_line *cl, const std::string &other, bool is_skip)
{
if (is_skip) {
- dassert(cl == nullptr, "");
- dassert(!other.empty(), "");
+ CHECK(cl == nullptr, "");
+ CHECK(!other.empty(), "");
std::cout << " s " << other << std::endl;
return;
}
@@ -1395,7 +1395,7 @@ void test_case::on_state_change(const state_snapshot &last, const state_snapshot
void test_case::internal_register_creator(const std::string &name, case_line_creator creator)
{
- dassert(_creators.find(name) == _creators.end(), "");
+ CHECK(_creators.find(name) == _creators.end(), "");
_creators[name] = creator;
}
}
diff --git a/src/replica/storage/simple_kv/test/checker.cpp b/src/replica/storage/simple_kv/test/checker.cpp
index e97ea4c0e..d2f27b641 100644
--- a/src/replica/storage/simple_kv/test/checker.cpp
+++ b/src/replica/storage/simple_kv/test/checker.cpp
@@ -290,7 +290,7 @@ void test_checker::get_current_states(state_snapshot &states)
for (auto &kv : app->_stub->_replicas) {
replica_ptr r = kv.second;
- dassert(kv.first == r->get_gpid(), "");
+ CHECK_EQ(kv.first, r->get_gpid());
replica_id id(r->get_gpid(), app->info().full_name);
replica_state &rs = states.state_map[id];
rs.id = id;
diff --git a/src/replica/storage/simple_kv/test/common.cpp b/src/replica/storage/simple_kv/test/common.cpp
index 94b804a7f..7c62d5bb0 100644
--- a/src/replica/storage/simple_kv/test/common.cpp
+++ b/src/replica/storage/simple_kv/test/common.cpp
@@ -246,10 +246,7 @@ std::string state_snapshot::diff_string(const state_snapshot &other) const
oss << add_mark << cur_it->second.to_string() << std::endl;
++cur_it;
} else {
- dassert(oth_it->first == cur_it->first,
- "invalid replica_id, %s VS %s",
- oth_it->first.to_string().c_str(),
- cur_it->first.to_string().c_str());
+ CHECK_EQ(oth_it->first, cur_it->first);
if (oth_it->second != cur_it->second) {
oss << chg_mark << cur_it->second.to_string()
<< " <= " << oth_it->second.to_string() << std::endl;
diff --git a/src/replica/storage/simple_kv/test/common.h b/src/replica/storage/simple_kv/test/common.h
index 06239c28c..432dfeb59 100644
--- a/src/replica/storage/simple_kv/test/common.h
+++ b/src/replica/storage/simple_kv/test/common.h
@@ -98,6 +98,10 @@ struct replica_id
bool operator!=(const replica_id &o) const { return !(*this == o); }
std::string to_string() const;
bool from_string(const std::string &str);
+ friend std::ostream &operator<<(std::ostream &os, const replica_id &rid)
+ {
+ return os << rid.to_string();
+ }
};
struct replica_state
diff --git a/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp b/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp
index 6c1c13095..ca9cbd20e 100644
--- a/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp
+++ b/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp
@@ -176,7 +176,7 @@ void simple_kv_service_impl::recover(const std::string &name, int64_t version)
is.read((char *)&count, sizeof(count));
is.read((char *)&magic, sizeof(magic));
- dassert(magic == 0xdeadbeef, "invalid checkpoint");
+ CHECK_EQ_MSG(magic, 0xdeadbeef, "invalid checkpoint");
for (uint64_t i = 0; i < count; i++) {
std::string key;
@@ -291,9 +291,8 @@ void simple_kv_service_impl::recover(const std::string &name, int64_t version)
// PRId64 "", last_committed_decree());
return ERR_OK;
} else {
- dassert(replication_app_base::chkpt_apply_mode::copy == mode, "invalid mode %d", (int)mode);
- dassert(state.to_decree_included > last_durable_decree(),
- "checkpoint's decree is smaller than current");
+ CHECK_EQ_MSG(replication_app_base::chkpt_apply_mode::copy, mode, "invalid mode");
+ CHECK_GT(state.to_decree_included, last_durable_decree());
char name[256];
sprintf(name, "%s/checkpoint.%" PRId64, data_dir().c_str(), state.to_decree_included);
diff --git a/src/runtime/global_config.cpp b/src/runtime/global_config.cpp
index 213529cb3..bb128f5ff 100644
--- a/src/runtime/global_config.cpp
+++ b/src/runtime/global_config.cpp
@@ -365,7 +365,7 @@ bool service_spec::init_app_specs()
// add app
app_specs.push_back(app);
- dassert((int)app_specs.size() == app.id, "incorrect app id");
+ CHECK_EQ(app_specs.size(), app.id);
// for next instance
app.ports.clear();
diff --git a/src/runtime/profiler.cpp b/src/runtime/profiler.cpp
index c2b5b006f..8bc44f3f4 100644
--- a/src/runtime/profiler.cpp
+++ b/src/runtime/profiler.cpp
@@ -58,6 +58,7 @@ START<== queue(server) == ENQUEUE <===== net(reply) ======= REPLY <=============
#include "profiler_header.h"
#include "utils/command_manager.h"
#include "perf_counter/perf_counter_wrapper.h"
+#include "utils/fmt_logging.h"
namespace dsn {
namespace tools {
@@ -126,7 +127,7 @@ static void profiler_on_task_create(task *caller, task *callee)
static void profiler_on_task_enqueue(task *caller, task *callee)
{
auto callee_code = callee->spec().code;
- dassert(callee_code >= 0 && callee_code <= s_task_code_max, "code = %d", callee_code.code());
+ CHECK(callee_code >= 0 && callee_code <= s_task_code_max, "code = {}", callee_code.code());
if (caller != nullptr) {
auto caller_code = caller->spec().code;
@@ -150,7 +151,8 @@ static void profiler_on_task_enqueue(task *caller, task *callee)
static void profiler_on_task_begin(task *this_)
{
auto code = this_->spec().code;
- dassert(code >= 0 && code <= s_task_code_max, "code = %d", code.code());
+ // TODO(yingchun): duplicate checks, should refactor later
+ CHECK(code >= 0 && code <= s_task_code_max, "code = {}", code.code());
uint64_t &qts = task_ext_for_profiler::get(this_);
uint64_t now = dsn_now_ns();
@@ -167,7 +169,7 @@ static void profiler_on_task_begin(task *this_)
static void profiler_on_task_end(task *this_)
{
auto code = this_->spec().code;
- dassert(code >= 0 && code <= s_task_code_max, "code = %d", code.code());
+ CHECK(code >= 0 && code <= s_task_code_max, "code = {}", code.code());
uint64_t qts = task_ext_for_profiler::get(this_);
uint64_t now = dsn_now_ns();
@@ -183,7 +185,7 @@ static void profiler_on_task_end(task *this_)
static void profiler_on_task_cancelled(task *this_)
{
auto code = this_->spec().code;
- dassert(code >= 0 && code <= s_task_code_max, "code = %d", code.code());
+ CHECK(code >= 0 && code <= s_task_code_max, "code = {}", code.code());
auto ptr = s_spec_profilers[code].ptr[TASK_CANCELLED].get();
if (ptr != nullptr)
@@ -207,9 +209,9 @@ static void profiler_on_aio_call(task *caller, aio_task *callee)
auto &prof = s_spec_profilers[caller_code];
if (prof.collect_call_count) {
auto callee_code = callee->spec().code;
- dassert(callee_code >= 0 && callee_code <= s_task_code_max,
- "code = %d",
- callee_code.code());
+ CHECK(callee_code >= 0 && callee_code <= s_task_code_max,
+ "code = {}",
+ callee_code.code());
prof.call_counts[callee_code]++;
}
}
@@ -221,7 +223,7 @@ static void profiler_on_aio_call(task *caller, aio_task *callee)
static void profiler_on_aio_enqueue(aio_task *this_)
{
auto code = this_->spec().code;
- dassert(code >= 0 && code <= s_task_code_max, "code = %d", code.code());
+ CHECK(code >= 0 && code <= s_task_code_max, "code = {}", code.code());
uint64_t &ats = task_ext_for_profiler::get(this_);
uint64_t now = dsn_now_ns();
@@ -246,9 +248,9 @@ static void profiler_on_rpc_call(task *caller, message_ex *req, rpc_response_tas
auto &prof = s_spec_profilers[caller_code];
if (prof.collect_call_count) {
- dassert(req->local_rpc_code >= 0 && req->local_rpc_code <= s_task_code_max,
- "code = %d",
- req->local_rpc_code.code());
+ CHECK(req->local_rpc_code >= 0 && req->local_rpc_code <= s_task_code_max,
+ "code = {}",
+ req->local_rpc_code.code());
prof.call_counts[req->local_rpc_code]++;
}
}
@@ -262,7 +264,7 @@ static void profiler_on_rpc_call(task *caller, message_ex *req, rpc_response_tas
static void profiler_on_rpc_request_enqueue(rpc_request_task *callee)
{
auto callee_code = callee->spec().code;
- dassert(callee_code >= 0 && callee_code <= s_task_code_max, "code = %d", callee_code.code());
+ CHECK(callee_code >= 0 && callee_code <= s_task_code_max, "code = {}", callee_code.code());
uint64_t now = dsn_now_ns();
task_ext_for_profiler::get(callee) = now;
@@ -296,13 +298,13 @@ static void profiler_on_rpc_create_response(message_ex *req, message_ex *resp)
static void profiler_on_rpc_reply(task *caller, message_ex *msg)
{
auto caller_code = caller->spec().code;
- dassert(caller_code >= 0 && caller_code <= s_task_code_max, "code = %d", caller_code.code());
+ CHECK(caller_code >= 0 && caller_code <= s_task_code_max, "code = {}", caller_code.code());
auto &prof = s_spec_profilers[caller_code];
if (prof.collect_call_count) {
- dassert(msg->local_rpc_code >= 0 && msg->local_rpc_code <= s_task_code_max,
- "code = %d",
- msg->local_rpc_code.code());
+ CHECK(msg->local_rpc_code >= 0 && msg->local_rpc_code <= s_task_code_max,
+ "code = {}",
+ msg->local_rpc_code.code());
prof.call_counts[msg->local_rpc_code]++;
}
@@ -311,7 +313,7 @@ static void profiler_on_rpc_reply(task *caller, message_ex *msg)
task_spec *spec = task_spec::get(msg->local_rpc_code);
dassert(spec != nullptr, "task_spec cannot be null, code = %d", msg->local_rpc_code.code());
auto code = spec->rpc_paired_code;
- dassert(code >= 0 && code <= s_task_code_max, "code = %d", code.code());
+ CHECK(code >= 0 && code <= s_task_code_max, "code = {}", code.code());
auto ptr = s_spec_profilers[code].ptr[RPC_SERVER_LATENCY_NS].get();
if (ptr != nullptr) {
ptr->set(now - qts);
@@ -325,7 +327,7 @@ static void profiler_on_rpc_reply(task *caller, message_ex *msg)
static void profiler_on_rpc_response_enqueue(rpc_response_task *resp)
{
auto resp_code = resp->spec().code;
- dassert(resp_code >= 0 && resp_code <= s_task_code_max, "code = %d", resp_code.code());
+ CHECK(resp_code >= 0 && resp_code <= s_task_code_max, "code = {}", resp_code.code());
uint64_t &cts = task_ext_for_profiler::get(resp);
uint64_t now = dsn_now_ns();
@@ -352,8 +354,7 @@ void profiler::install(service_spec &)
s_spec_profilers.reset(new task_spec_profiler[s_task_code_max + 1]);
task_ext_for_profiler::register_ext();
message_ext_for_profiler::register_ext();
- dassert(sizeof(counter_info_ptr) / sizeof(counter_info *) == PERF_COUNTER_COUNT,
- "PREF COUNTER ERROR");
+ CHECK_EQ(sizeof(counter_info_ptr) / sizeof(counter_info *), PERF_COUNTER_COUNT);
auto profile = dsn_config_get_value_bool(
"task..default", "is_profile", false, "whether to profile this kind of task");
diff --git a/src/runtime/rpc/asio_net_provider.cpp b/src/runtime/rpc/asio_net_provider.cpp
index d72ce1105..5cdb77991 100644
--- a/src/runtime/rpc/asio_net_provider.cpp
+++ b/src/runtime/rpc/asio_net_provider.cpp
@@ -30,6 +30,7 @@
#include "asio_net_provider.h"
#include "asio_rpc_session.h"
#include "utils/flags.h"
+#include "utils/fmt_logging.h"
namespace dsn {
namespace tools {
@@ -95,9 +96,9 @@ error_code asio_network_provider::start(rpc_channel channel, int port, bool clie
_acceptor = nullptr;
- dassert(channel == RPC_CHANNEL_TCP || channel == RPC_CHANNEL_UDP,
- "invalid given channel %s",
- channel.to_string());
+ CHECK(channel == RPC_CHANNEL_TCP || channel == RPC_CHANNEL_UDP,
+ "invalid given channel {}",
+ channel.to_string());
_address.assign_ipv4(get_local_ipv4(), port);
@@ -190,13 +191,13 @@ void asio_udp_provider::send_message(message_ex *request)
auto lcount = parser->get_buffer_count_on_send(request);
std::unique_ptr<message_parser::send_buf[]> bufs(new message_parser::send_buf[lcount]);
auto rcount = parser->get_buffers_on_send(request, bufs.get());
- dassert(lcount >= rcount, "%d VS %d", lcount, rcount);
+ CHECK_GE(lcount, rcount);
size_t tlen = 0, offset = 0;
for (int i = 0; i < rcount; i++) {
tlen += bufs[i].sz;
}
- dassert(tlen <= max_udp_packet_size, "the message is too large to send via a udp channel");
+ CHECK_LE_MSG(tlen, max_udp_packet_size, "the message is too large to send via a udp channel");
std::unique_ptr<char[]> packet_buffer(new char[tlen]);
for (int i = 0; i < rcount; i++) {
@@ -265,8 +266,9 @@ void asio_udp_provider::do_receive()
_recv_reader.truncate_read();
auto buffer_ptr = _recv_reader.read_buffer_ptr(max_udp_packet_size);
- dassert(_recv_reader.read_buffer_capacity() >= max_udp_packet_size,
- "failed to load enough buffer in parser");
+ CHECK_GE_MSG(_recv_reader.read_buffer_capacity(),
+ max_udp_packet_size,
+ "failed to load enough buffer in parser");
_socket->async_receive_from(
::boost::asio::buffer(buffer_ptr, max_udp_packet_size),
@@ -329,7 +331,7 @@ error_code asio_udp_provider::start(rpc_channel channel, int port, bool client_o
1,
"thread number for io service (timer and boost network)");
- dassert(channel == RPC_CHANNEL_UDP, "invalid given channel %s", channel.to_string());
+ CHECK_EQ(channel, RPC_CHANNEL_UDP);
if (client_only) {
do {
diff --git a/src/runtime/rpc/dsn_message_parser.cpp b/src/runtime/rpc/dsn_message_parser.cpp
index 319f56921..6c1cb86b5 100644
--- a/src/runtime/rpc/dsn_message_parser.cpp
+++ b/src/runtime/rpc/dsn_message_parser.cpp
@@ -112,7 +112,7 @@ void dsn_message_parser::prepare_on_send(message_ex *msg)
for (int i = 0; i <= i_max; i++) {
len += (size_t)buffers[i].length();
}
- dassert(len == (size_t)header->body_length + sizeof(message_header), "data length is wrong");
+ CHECK_EQ(len, header->body_length + sizeof(message_header));
#endif
if (task_spec::get(msg->local_rpc_code)->rpc_message_crc_required) {
@@ -140,7 +140,7 @@ void dsn_message_parser::prepare_on_send(message_ex *msg)
len += sz;
}
- dassert(len == (size_t)header->body_length, "data length is wrong");
+ CHECK_EQ(len, header->body_length);
header->body_crc32 = crc32;
}
@@ -200,8 +200,7 @@ int dsn_message_parser::get_buffers_on_send(message_ex *msg, /*out*/ send_buf *b
len += sz;
}
- dassert(len == (size_t)header->body_length, "data length is wrong");
-
+ CHECK_EQ(len, header->body_length);
bool r = (header->body_crc32 == crc32);
if (!r) {
LOG_ERROR("dsn message body crc check failed");
diff --git a/src/runtime/rpc/group_address.h b/src/runtime/rpc/group_address.h
index aa5b2ff77..ee05cce1f 100644
--- a/src/runtime/rpc/group_address.h
+++ b/src/runtime/rpc/group_address.h
@@ -121,7 +121,7 @@ inline rpc_group_address &rpc_group_address::operator=(const rpc_group_address &
inline bool rpc_group_address::add(rpc_address addr)
{
- dassert(addr.type() == HOST_TYPE_IPV4, "rpc group address member must be ipv4");
+ CHECK_EQ_MSG(addr.type(), HOST_TYPE_IPV4, "rpc group address member must be ipv4");
alw_t l(_lock);
if (_members.end() == std::find(_members.begin(), _members.end(), addr)) {
@@ -146,7 +146,7 @@ inline void rpc_group_address::set_leader(rpc_address addr)
if (addr.is_invalid()) {
_leader_index = -1;
} else {
- dassert(addr.type() == HOST_TYPE_IPV4, "rpc group address member must be ipv4");
+ CHECK_EQ_MSG(addr.type(), HOST_TYPE_IPV4, "rpc group address member must be ipv4");
for (int i = 0; i < (int)_members.size(); i++) {
if (_members[i] == addr) {
_leader_index = i;
diff --git a/src/runtime/rpc/message_parser.cpp b/src/runtime/rpc/message_parser.cpp
index 93a95b089..f846e5dad 100644
--- a/src/runtime/rpc/message_parser.cpp
+++ b/src/runtime/rpc/message_parser.cpp
@@ -161,12 +161,11 @@ char *message_reader::read_buffer_ptr(unsigned int read_next)
_buffer_occupied = rb.length();
}
- dassert(read_next + _buffer_occupied <= _buffer.length(),
- "%u(%u + %u) VS %u",
- read_next + _buffer_occupied,
- read_next,
- _buffer_occupied,
- _buffer.length());
+ CHECK_LE_MSG(read_next + _buffer_occupied,
+ _buffer.length(),
+ "read_next: {}, _buffer_occupied: {}",
+ read_next,
+ _buffer_occupied);
}
return (char *)(_buffer.data() + _buffer_occupied);
diff --git a/src/runtime/rpc/network.cpp b/src/runtime/rpc/network.cpp
index 2d3eca52d..98dd3c457 100644
--- a/src/runtime/rpc/network.cpp
+++ b/src/runtime/rpc/network.cpp
@@ -50,8 +50,8 @@ rpc_session::~rpc_session()
{
utils::auto_lock<utils::ex_lock_nr> l(_lock);
- dassert(0 == _sending_msgs.size(), "sending queue is not cleared yet");
- dassert(0 == _message_count, "sending queue is not cleared yet");
+ CHECK_EQ_MSG(0, _sending_msgs.size(), "sending queue is not cleared yet");
+ CHECK_EQ_MSG(0, _message_count, "sending queue is not cleared yet");
}
}
@@ -187,7 +187,7 @@ inline bool rpc_session::unlink_message_for_send()
_sending_buffers.resize(bcount + lcount);
auto rcount = _parser->get_buffers_on_send(lmsg, &_sending_buffers[bcount]);
- dassert(lcount >= rcount, "%d VS %d", lcount, rcount);
+ CHECK_GE(lcount, rcount);
if (lcount != rcount)
_sending_buffers.resize(bcount + rcount);
bcount += rcount;
@@ -308,13 +308,14 @@ void rpc_session::on_send_completed(uint64_t signature)
{
utils::auto_lock<utils::ex_lock_nr> l(_lock);
if (signature != 0) {
- dassert(_is_sending_next && signature == _message_sent + 1, "sent msg must be sending");
+ CHECK(_is_sending_next && signature == _message_sent + 1, "sent msg must be sending");
_is_sending_next = false;
// the _sending_msgs may have been cleared when reading of the rpc_session is failed.
if (_sending_msgs.size() == 0) {
- dassert(_connect_state == SS_DISCONNECTED,
- "assume sending queue is cleared due to session closed");
+ CHECK_EQ_MSG(_connect_state,
+ SS_DISCONNECTED,
+ "assume sending queue is cleared due to session closed");
return;
}
@@ -423,7 +424,7 @@ bool rpc_session::on_recv_message(message_ex *msg, int delay_ms)
if (is_client() && msg->header->from_address == _net.engine()->primary_address()) {
LOG_ERROR("self connection detected, address = %s",
msg->header->from_address.to_string());
- dassert(msg->get_count() == 0, "message should not be referenced by anybody so far");
+ CHECK_EQ_MSG(msg->get_count(), 0, "message should not be referenced by anybody so far");
delete msg;
return false;
}
diff --git a/src/runtime/rpc/network.sim.cpp b/src/runtime/rpc/network.sim.cpp
index 11eb4c7a3..72a72eea4 100644
--- a/src/runtime/rpc/network.sim.cpp
+++ b/src/runtime/rpc/network.sim.cpp
@@ -172,9 +172,9 @@ sim_network_provider::sim_network_provider(rpc_engine *rpc, network *inner_provi
error_code sim_network_provider::start(rpc_channel channel, int port, bool client_only)
{
- dassert(channel == RPC_CHANNEL_TCP || channel == RPC_CHANNEL_UDP,
- "invalid given channel %s",
- channel.to_string());
+ CHECK(channel == RPC_CHANNEL_TCP || channel == RPC_CHANNEL_UDP,
+ "invalid given channel {}",
+ channel.to_string());
_address = ::dsn::rpc_address("localhost", port);
auto hostname = boost::asio::ip::host_name();
diff --git a/src/runtime/rpc/rpc_engine.cpp b/src/runtime/rpc/rpc_engine.cpp
index 1441aa21e..2afc9f1c5 100644
--- a/src/runtime/rpc/rpc_engine.cpp
+++ b/src/runtime/rpc/rpc_engine.cpp
@@ -67,8 +67,8 @@ private:
rpc_client_matcher::~rpc_client_matcher()
{
for (int i = 0; i < MATCHER_BUCKET_NR; i++) {
- dassert(_requests[i].size() == 0,
- "all rpc entries must be removed before the matcher ends");
+ CHECK_EQ_MSG(
+ _requests[i].size(), 0, "all rpc entries must be removed before the matcher ends");
}
}
@@ -87,8 +87,8 @@ bool rpc_client_matcher::on_recv_reply(network *net, uint64_t key, message_ex *r
_requests[bucket_index].erase(it);
} else {
if (reply) {
- dassert(reply->get_count() == 0,
- "reply should not be referenced by anybody so far");
+ CHECK_EQ_MSG(
+ reply->get_count(), 0, "reply should not be referenced by anybody so far");
delete reply;
}
return false;
@@ -154,7 +154,7 @@ bool rpc_client_matcher::on_recv_reply(network *net, uint64_t key, message_ex *r
// TODO(qinzuoyan): reset timeout to new value
_engine->call_ip(addr, req, call, true);
- dassert(reply->get_count() == 0, "reply should not be referenced by anybody so far");
+ CHECK_EQ_MSG(reply->get_count(), 0, "reply should not be referenced by anybody so far");
delete reply;
} else {
// server address side effect
@@ -327,8 +327,8 @@ rpc_server_dispatcher::~rpc_server_dispatcher()
}
_vhandlers.clear();
_handlers.clear();
- dassert(_handlers.size() == 0,
- "please make sure all rpc handlers are unregistered at this point");
+ CHECK_EQ_MSG(
+ _handlers.size(), 0, "please make sure all rpc handlers are unregistered at this point");
}
bool rpc_server_dispatcher::register_rpc_handler(dsn::task_code code,
@@ -543,7 +543,7 @@ void rpc_engine::on_recv_request(network *net, message_ex *msg, int delay_ms)
msg->header->from_address.to_string(),
msg->header->trace_id);
- dassert(msg->get_count() == 0, "request should not be referenced by anybody so far");
+ CHECK_EQ_MSG(msg->get_count(), 0, "request should not be referenced by anybody so far");
delete msg;
return;
}
@@ -591,7 +591,7 @@ void rpc_engine::on_recv_request(network *net, message_ex *msg, int delay_ms)
msg->header->from_address.to_string(),
msg->header->trace_id);
- dassert(msg->get_count() == 0, "request should not be referenced by anybody so far");
+ CHECK_EQ_MSG(msg->get_count(), 0, "request should not be referenced by anybody so far");
msg->add_ref();
dsn_rpc_reply(msg->create_response(), ::dsn::ERR_HANDLER_NOT_FOUND);
msg->release_ref();
@@ -602,7 +602,7 @@ void rpc_engine::on_recv_request(network *net, message_ex *msg, int delay_ms)
msg->header->from_address.to_string(),
msg->header->trace_id);
- dassert(msg->get_count() == 0, "request should not be referenced by anybody so far");
+ CHECK_EQ_MSG(msg->get_count(), 0, "request should not be referenced by anybody so far");
msg->add_ref();
dsn_rpc_reply(msg->create_response(), ::dsn::ERR_HANDLER_NOT_FOUND);
msg->release_ref();
diff --git a/src/runtime/rpc/rpc_holder.h b/src/runtime/rpc/rpc_holder.h
index ac888947c..238d5ee03 100644
--- a/src/runtime/rpc/rpc_holder.h
+++ b/src/runtime/rpc/rpc_holder.h
@@ -240,8 +240,8 @@ public:
using mail_box_u_ptr = std::unique_ptr<mail_box_t>;
static void enable_mocking()
{
- dassert(_mail_box == nullptr && _forward_mail_box == nullptr,
- "remember to call clear_mocking_env after testing");
+ CHECK(_mail_box == nullptr && _forward_mail_box == nullptr,
+ "remember to call clear_mocking_env after testing");
_mail_box = make_unique<mail_box_t>();
_forward_mail_box = make_unique<mail_box_t>();
}
diff --git a/src/runtime/rpc/rpc_message.cpp b/src/runtime/rpc/rpc_message.cpp
index cf7beb77d..1f799ac24 100644
--- a/src/runtime/rpc/rpc_message.cpp
+++ b/src/runtime/rpc/rpc_message.cpp
@@ -264,9 +264,9 @@ message_ex *message_ex::copy_and_prepare_send(bool clone_content)
if (_is_read) {
// the message_header is hidden ahead of the buffer, expose it to buffer
- dassert(buffers.size() == 1, "there must be only one buffer for read msg");
- dassert((char *)header + sizeof(message_header) == (char *)buffers[0].data(),
- "header and content must be contigous");
+ CHECK_EQ_MSG(buffers.size(), 1, "there must be only one buffer for read msg");
+ CHECK((char *)header + sizeof(message_header) == (char *)buffers[0].data(),
+ "header and content must be contigous");
copy->buffers[0] = copy->buffers[0].range(-(int)sizeof(message_header));
@@ -406,8 +406,7 @@ void message_ex::write_next(void **ptr, size_t *size, size_t min_size)
this->_rw_offset = 0;
this->buffers.push_back(buffer);
- dassert(this->_rw_index + 1 == (int)this->buffers.size(),
- "message write buffer count is not right");
+ CHECK_EQ_MSG(_rw_index + 1, buffers.size(), "message write buffer count is not right");
}
void message_ex::write_commit(size_t size)
diff --git a/src/runtime/rpc/thrift_message_parser.cpp b/src/runtime/rpc/thrift_message_parser.cpp
index 4838082b6..fa69def91 100644
--- a/src/runtime/rpc/thrift_message_parser.cpp
+++ b/src/runtime/rpc/thrift_message_parser.cpp
@@ -354,11 +354,11 @@ void thrift_message_parser::prepare_on_send(message_ex *msg)
int dsn_buf_count = 0;
while (dsn_size > 0 && dsn_buf_count < buffers.size()) {
blob &buf = buffers[dsn_buf_count];
- dassert(dsn_size >= buf.length(), "%u VS %u", dsn_size, buf.length());
+ CHECK_GE(dsn_size, buf.length());
dsn_size -= buf.length();
++dsn_buf_count;
}
- dassert(dsn_size == 0, "dsn_size = %u", dsn_size);
+ CHECK_EQ(dsn_size, 0);
// put header_bb and end_bb at the end
buffers.resize(dsn_buf_count);
@@ -379,7 +379,7 @@ int thrift_message_parser::get_buffers_on_send(message_ex *msg, /*out*/ send_buf
int dsn_buf_count = 0;
while (dsn_size > 0 && dsn_buf_count < msg_buffers.size()) {
blob &buf = msg_buffers[dsn_buf_count];
- dassert(dsn_size >= buf.length(), "%u VS %u", dsn_size, buf.length());
+ CHECK_GE(dsn_size, buf.length());
dsn_size -= buf.length();
++dsn_buf_count;
@@ -392,8 +392,8 @@ int thrift_message_parser::get_buffers_on_send(message_ex *msg, /*out*/ send_buf
offset = 0;
++i;
}
- dassert(dsn_size == 0, "dsn_size = %u", dsn_size);
- dassert(dsn_buf_count + 2 == msg_buffers.size(), "must have 2 more blob at the end");
+ CHECK_EQ(dsn_size, 0);
+ CHECK_EQ_MSG(dsn_buf_count + 2, msg_buffers.size(), "must have 2 more blob at the end");
// set header
blob &header_bb = msg_buffers[dsn_buf_count];
diff --git a/src/runtime/service_api_c.cpp b/src/runtime/service_api_c.cpp
index b8591a3a1..d3faa8db1 100644
--- a/src/runtime/service_api_c.cpp
+++ b/src/runtime/service_api_c.cpp
@@ -124,9 +124,7 @@ DSN_API bool dsn_rpc_unregiser_handler(dsn::task_code code)
DSN_API void dsn_rpc_call(dsn::rpc_address server, dsn::rpc_response_task *rpc_call)
{
- dassert(rpc_call->spec().type == TASK_TYPE_RPC_RESPONSE,
- "invalid task_type, type = %s",
- enum_to_string(rpc_call->spec().type));
+ CHECK_EQ_MSG(rpc_call->spec().type, TASK_TYPE_RPC_RESPONSE, "invalid task_type");
auto msg = rpc_call->get_request();
msg->server_address = server;
@@ -200,7 +198,7 @@ NORETURN DSN_API void dsn_exit(int code)
DSN_API bool dsn_mimic_app(const char *app_role, int index)
{
auto worker = ::dsn::task::get_current_worker2();
- dassert(worker == nullptr, "cannot call dsn_mimic_app in rDSN threads");
+ CHECK(worker == nullptr, "cannot call dsn_mimic_app in rDSN threads");
auto cnode = ::dsn::task::get_current_node2();
if (cnode != nullptr) {
diff --git a/src/runtime/service_engine.cpp b/src/runtime/service_engine.cpp
index 10435d88a..49675a2b8 100644
--- a/src/runtime/service_engine.cpp
+++ b/src/runtime/service_engine.cpp
@@ -245,7 +245,7 @@ void service_engine::start_node(service_app_spec &app_spec)
auto node = std::make_shared<service_node>(app_spec);
error_code err = node->start();
- dassert_f(err == ERR_OK, "service node start failed, err = {}", err.to_string());
+ CHECK_EQ_MSG(err, ERR_OK, "service node start failed");
_nodes_by_app_id[node->id()] = node;
}
diff --git a/src/runtime/task/task.cpp b/src/runtime/task/task.cpp
index 330747dd1..880d5b3cd 100644
--- a/src/runtime/task/task.cpp
+++ b/src/runtime/task/task.cpp
@@ -57,10 +57,10 @@ __thread uint16_t tls_dsn_lower32_task_id_mask = 0;
tls_dsn.node_id = node->id();
if (worker != nullptr) {
- dassert(worker->pool()->node() == node,
- "worker not belonging to the given node: %s vs %s",
- worker->pool()->node()->full_name(),
- node->full_name());
+ CHECK(worker->pool()->node() == node,
+ "worker not belonging to the given node: {} vs {}",
+ worker->pool()->node()->full_name(),
+ node->full_name());
}
tls_dsn.node = node;
@@ -161,7 +161,8 @@ void task::exec_internal()
if (_state.compare_exchange_strong(
READY_STATE, TASK_STATE_RUNNING, std::memory_order_relaxed)) {
- dassert(tls_dsn.magic == 0xdeadbeef, "thread is not inited with task::set_tls_dsn_context");
+ CHECK_EQ_MSG(
+ tls_dsn.magic, 0xdeadbeef, "thread is not inited with task::set_tls_dsn_context");
task *parent_task = tls_dsn.current_task;
tls_dsn.current_task = this;
@@ -405,7 +406,7 @@ void task::enqueue(task_worker_pool *pool)
// fast execution
if (_is_null) {
- dassert(_node == task::get_current_node(), "");
+ CHECK(_node == task::get_current_node(), "");
exec_internal();
return;
}
diff --git a/src/runtime/task/task.h b/src/runtime/task/task.h
index cb94e5ffc..44260dd6f 100644
--- a/src/runtime/task/task.h
+++ b/src/runtime/task/task.h
@@ -498,9 +498,9 @@ public:
void replace_callback(rpc_response_handler &&cb)
{
task_state cur_state = state();
- dassert(cur_state == TASK_STATE_READY || cur_state == TASK_STATE_RUNNING,
- "invalid task_state: %s",
- enum_to_string(cur_state));
+ CHECK(cur_state == TASK_STATE_READY || cur_state == TASK_STATE_RUNNING,
+ "invalid task_state: {}",
+ enum_to_string(cur_state));
_cb = std::move(cb);
}
void replace_callback(const rpc_response_handler &cb)
diff --git a/src/runtime/task/task_engine.cpp b/src/runtime/task/task_engine.cpp
index fbfb0fcbc..a0f1ed730 100644
--- a/src/runtime/task/task_engine.cpp
+++ b/src/runtime/task/task_engine.cpp
@@ -120,8 +120,8 @@ void task_worker_pool::stop()
void task_worker_pool::add_timer(task *t)
{
- dassert(t->delay_milliseconds() > 0,
- "task delayed should be dispatched to timer service first");
+ CHECK_GT_MSG(
+ t->delay_milliseconds(), 0, "task delayed should be dispatched to timer service first");
unsigned int idx = (_spec.partitioned
? static_cast<unsigned int>(t->hash()) %
@@ -132,15 +132,15 @@ void task_worker_pool::add_timer(task *t)
void task_worker_pool::enqueue(task *t)
{
- dassert(t->spec().pool_code == spec().pool_code || t->spec().type == TASK_TYPE_RPC_RESPONSE,
- "Invalid thread pool used");
- dassert(t->delay_milliseconds() == 0,
- "task delayed should be dispatched to timer service first");
-
- dassert(_is_running,
- "worker pool %s must be started before enqueue task %s",
- spec().name.c_str(),
- t->spec().name.c_str());
+ CHECK(t->spec().pool_code == spec().pool_code || t->spec().type == TASK_TYPE_RPC_RESPONSE,
+ "Invalid thread pool used");
+ CHECK_EQ_MSG(
+ t->delay_milliseconds(), 0, "task delayed should be dispatched to timer service first");
+
+ CHECK(_is_running,
+ "worker pool {} must be started before enqueue task {}",
+ spec().name,
+ t->spec().name);
unsigned int idx =
(_spec.partitioned
? static_cast<unsigned int>(t->hash()) % static_cast<unsigned int>(_queues.size())
diff --git a/src/runtime/task/task_engine.sim.cpp b/src/runtime/task/task_engine.sim.cpp
index ed5e504ae..9ed8f2559 100644
--- a/src/runtime/task/task_engine.sim.cpp
+++ b/src/runtime/task/task_engine.sim.cpp
@@ -49,7 +49,7 @@ sim_task_queue::sim_task_queue(task_worker_pool *pool, int index, task_queue *in
void sim_task_queue::enqueue(task *t)
{
- dassert(0 == t->delay_milliseconds(), "delay time must be zero");
+ CHECK_EQ_MSG(0, t->delay_milliseconds(), "delay time must be zero");
if (_tasks.size() > 0) {
do {
int random_pos = rand::next_u32(0, 1000000);
@@ -146,7 +146,7 @@ void sim_lock_provider::lock()
_sema.wait(TIME_MS_MAX);
- dassert(-1 == _current_holder && _lock_depth == 0, "must be unlocked state");
+ CHECK(-1 == _current_holder && _lock_depth == 0, "must be unlocked state");
_current_holder = ctid;
++_lock_depth;
}
@@ -165,7 +165,7 @@ bool sim_lock_provider::try_lock()
bool r = _sema.wait(0);
if (r) {
- dassert(-1 == _current_holder && _lock_depth == 0, "must be unlocked state");
+ CHECK(-1 == _current_holder && _lock_depth == 0, "must be unlocked state");
_current_holder = ctid;
++_lock_depth;
}
@@ -178,8 +178,9 @@ void sim_lock_provider::unlock()
if (scheduler::is_scheduling())
return;
- dassert(::dsn::utils::get_current_tid() == _current_holder,
- "lock must be locked must current holder");
+ CHECK_EQ_MSG(::dsn::utils::get_current_tid(),
+ _current_holder,
+ "lock must be locked must current holder");
if (0 == --_lock_depth) {
_current_holder = -1;
@@ -208,7 +209,7 @@ void sim_lock_nr_provider::lock()
_sema.wait(TIME_MS_MAX);
- dassert(-1 == _current_holder && _lock_depth == 0, "must be unlocked state");
+ CHECK(-1 == _current_holder && _lock_depth == 0, "must be unlocked state");
_current_holder = ctid;
++_lock_depth;
}
@@ -224,7 +225,7 @@ bool sim_lock_nr_provider::try_lock()
bool r = _sema.wait(0);
if (r) {
- dassert(-1 == _current_holder && _lock_depth == 0, "must be unlocked state");
+ CHECK(-1 == _current_holder && _lock_depth == 0, "must be unlocked state");
_current_holder = ctid;
++_lock_depth;
}
@@ -237,8 +238,9 @@ void sim_lock_nr_provider::unlock()
if (scheduler::is_scheduling())
return;
- dassert(::dsn::utils::get_current_tid() == _current_holder,
- "lock must be locked must current holder");
+ CHECK_EQ_MSG(::dsn::utils::get_current_tid(),
+ _current_holder,
+ "lock must be locked must current holder");
if (0 == --_lock_depth) {
_current_holder = -1;
diff --git a/src/runtime/task/task_spec.cpp b/src/runtime/task/task_spec.cpp
index f0d000380..4970de0a3 100644
--- a/src/runtime/task/task_spec.cpp
+++ b/src/runtime/task/task_spec.cpp
@@ -165,11 +165,12 @@ task_spec::task_spec(int code,
on_rpc_response_enqueue((std::string(name) + std::string(".rpc.response.enqueue")).c_str()),
on_rpc_create_response((std::string(name) + std::string(".rpc.response.create")).c_str())
{
- dassert(strlen(name) < DSN_MAX_TASK_CODE_NAME_LENGTH,
- "task code name '%s' is too long: length must be smaller than "
- "DSN_MAX_TASK_CODE_NAME_LENGTH (%u)",
- name,
- DSN_MAX_TASK_CODE_NAME_LENGTH);
+ CHECK_LT_MSG(strlen(name),
+ DSN_MAX_TASK_CODE_NAME_LENGTH,
+ "task code name '{}' is too long: length must be smaller than "
+ "DSN_MAX_TASK_CODE_NAME_LENGTH ({})",
+ name,
+ DSN_MAX_TASK_CODE_NAME_LENGTH);
rpc_call_channel = RPC_CHANNEL_TCP;
rpc_timeout_milliseconds = 5 * 1000; // 5 seconds
@@ -210,9 +211,9 @@ bool task_spec::init()
spec->allow_inline = true;
}
- dassert(spec->rpc_request_delays_milliseconds.size() == 0 ||
- spec->rpc_request_delays_milliseconds.size() == 6,
- "invalid length of rpc_request_delays_milliseconds, must be of length 6");
+ CHECK(spec->rpc_request_delays_milliseconds.size() == 0 ||
+ spec->rpc_request_delays_milliseconds.size() == 6,
+ "invalid length of rpc_request_delays_milliseconds, must be of length 6");
if (spec->rpc_request_delays_milliseconds.size() > 0) {
spec->rpc_request_delayer.initialize(spec->rpc_request_delays_milliseconds);
}
diff --git a/src/runtime/task/task_tracker.h b/src/runtime/task/task_tracker.h
index 5d26d4c1b..3e9cad63d 100644
--- a/src/runtime/task/task_tracker.h
+++ b/src/runtime/task/task_tracker.h
@@ -24,23 +24,15 @@
* THE SOFTWARE.
*/
-/*
- * Description:
- * tracker abstraction for tasks, to ensure the tasks are cancelled
- * appropriately when the context is gone
- *
- * Revision history:
- * Mar., 2015, @imzhenyu (Zhenyu Guo), first version
- * xxxx-xx-xx, author, fix bug about xxx
- */
-
#pragma once
+#include <atomic>
+
+#include "utils/api_utilities.h"
+#include "utils/error_code.h"
+#include "utils/fmt_logging.h"
#include "utils/link.h"
#include "utils/synchronize.h"
-#include "utils/error_code.h"
-#include "utils/api_utilities.h"
-#include <atomic>
namespace dsn {
//
@@ -184,7 +176,7 @@ private:
// ------- inlined implementation ----------
inline void trackable_task::set_tracker(task_tracker *owner, task *tsk)
{
- dassert(_owner == nullptr, "task tracker is already set");
+ CHECK(_owner == nullptr, "task tracker is already set");
_owner = owner;
_task = tsk;
_deleting_owner.store(OWNER_DELETE_NOT_LOCKED, std::memory_order_release);
diff --git a/src/runtime/task/task_worker.cpp b/src/runtime/task/task_worker.cpp
index 65b2fe153..39339dac9 100644
--- a/src/runtime/task/task_worker.cpp
+++ b/src/runtime/task/task_worker.cpp
@@ -135,13 +135,15 @@ void task_worker::set_priority(worker_priority_t pri)
void task_worker::set_affinity(uint64_t affinity)
{
#if defined(__linux__)
- dassert(affinity > 0, "affinity cannot be 0.");
+ CHECK_GT(affinity, 0);
int nr_cpu = static_cast<int>(std::thread::hardware_concurrency());
if (nr_cpu < 64) {
- dassert(affinity <= (((uint64_t)1 << nr_cpu) - 1),
- "There are %d cpus in total, while setting thread affinity to a nonexistent one.",
- nr_cpu);
+ CHECK_LE_MSG(
+ affinity,
+ (((uint64_t)1 << nr_cpu) - 1),
+ "There are {} cpus in total, while setting thread affinity to a nonexistent one.",
+ nr_cpu);
}
int err = 0;
@@ -229,10 +231,7 @@ void task_worker::loop()
}
#ifndef NDEBUG
- dassert(count == batch_size,
- "returned task count and batch size do not match: %d vs %d",
- count,
- batch_size);
+ CHECK_EQ_MSG(count, batch_size, "returned task count and batch size do not match");
#endif
_processed_task_count += batch_size;
diff --git a/src/runtime/tool_api.cpp b/src/runtime/tool_api.cpp
index 197f6eb83..9d3d7e0d2 100644
--- a/src/runtime/tool_api.cpp
+++ b/src/runtime/tool_api.cpp
@@ -58,7 +58,7 @@ public:
if (_start) {
error_code err;
err = _node->start_app();
- dassert(err == ERR_OK, "start app failed, err = %s", err.to_string());
+ CHECK_EQ_MSG(err, ERR_OK, "start app failed");
} else {
LOG_INFO("stop app result(%s)", _node->stop_app(_cleanup).to_string());
}
diff --git a/src/server/available_detector.cpp b/src/server/available_detector.cpp
index 6ee734edb..b2f65ab25 100644
--- a/src/server/available_detector.cpp
+++ b/src/server/available_detector.cpp
@@ -49,12 +49,12 @@ available_detector::available_detector()
_cluster_name = dsn::get_current_cluster_name();
_app_name = dsn_config_get_value_string(
"pegasus.collector", "available_detect_app", "", "available detector app name");
- dassert(_app_name.size() > 0, "");
+ CHECK(!_app_name.empty(), "");
_alert_script_dir = dsn_config_get_value_string("pegasus.collector",
"available_detect_alert_script_dir",
".",
"available detect alert script dir");
- dassert(_alert_script_dir.size() > 0, "");
+ CHECK(!_alert_script_dir.empty(), "");
_alert_email_address = dsn_config_get_value_string(
"pegasus.collector",
"available_detect_alert_email_address",
@@ -62,7 +62,7 @@ available_detector::available_detector()
"available detect alert email address, empty means not send email");
_meta_list.clear();
dsn::replication::replica_helper::load_meta_servers(_meta_list);
- dassert(_meta_list.size() > 0, "");
+ CHECK(!_meta_list.empty(), "");
_detect_interval_seconds =
(uint32_t)dsn_config_get_value_uint64("pegasus.collector",
"available_detect_interval_seconds",
diff --git a/src/server/brief_stat.cpp b/src/server/brief_stat.cpp
index 8d3c8045d..c3e44781c 100644
--- a/src/server/brief_stat.cpp
+++ b/src/server/brief_stat.cpp
@@ -17,11 +17,13 @@
* under the License.
*/
+#include "brief_stat.h"
+
#include <iomanip>
-#include "utils/api_utilities.h"
-#include "perf_counter/perf_counters.h"
-#include "brief_stat.h"
+#include "perf_counter/perf_counters.h"
+#include "utils/api_utilities.h"
+#include "utils/fmt_logging.h"
namespace pegasus {
@@ -73,7 +75,7 @@ std::string get_brief_stat()
std::vector<bool> match_result;
dsn::perf_counters::instance().query_snapshot(stat_counters, iter, &match_result);
- dassert(stat_counters.size() == match_result.size(), "");
+ CHECK_EQ(stat_counters.size(), match_result.size());
for (int i = 0; i < match_result.size(); ++i) {
if (!match_result[i]) {
if (!first_item)
diff --git a/src/server/hashkey_transform.h b/src/server/hashkey_transform.h
index 2f99d9b0b..1debda1a9 100644
--- a/src/server/hashkey_transform.h
+++ b/src/server/hashkey_transform.h
@@ -48,8 +48,8 @@ public:
// hash_key_len is in big endian
uint16_t hash_key_len = dsn::endian::ntoh(*(uint16_t *)(src.data()));
- dassert(src.size() >= 2 + hash_key_len,
- "key length must be no less than (2 + hash_key_len)");
+ CHECK_GE_MSG(
+ src.size(), 2 + hash_key_len, "key length must be no less than (2 + hash_key_len)");
return rocksdb::Slice(src.data(), 2 + hash_key_len);
}
diff --git a/src/server/hotspot_partition_calculator.cpp b/src/server/hotspot_partition_calculator.cpp
index 3dd2ca607..83f487583 100644
--- a/src/server/hotspot_partition_calculator.cpp
+++ b/src/server/hotspot_partition_calculator.cpp
@@ -146,10 +146,12 @@ void hotspot_partition_calculator::update_hot_point(uint32_t data_type,
void hotspot_partition_calculator::data_analyse()
{
- dassert(_partitions_stat_histories.back().size() == _hot_points.size(),
- "The number of partitions in this table has changed, and hotspot analysis cannot be "
- "performed,in %s",
- _app_name.c_str());
+ CHECK_EQ_MSG(
+ _partitions_stat_histories.back().size(),
+ _hot_points.size(),
+ "The number of partitions in this table has changed, and hotspot analysis cannot be "
+ "performed, in {}",
+ _app_name);
std::vector<int> read_hot_points;
stat_histories_analyse(READ_HOTSPOT_DATA, read_hot_points);
diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp
index 79980e781..ab3ed809a 100644
--- a/src/server/pegasus_server_impl.cpp
+++ b/src/server/pegasus_server_impl.cpp
@@ -2165,25 +2165,16 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode,
int64_t ci = state.to_decree_included;
if (mode == chkpt_apply_mode::copy) {
- dassert(ci > last_durable_decree(),
- "state.to_decree_included(%" PRId64 ") <= last_durable_decree(%" PRId64 ")",
- ci,
- last_durable_decree());
+ CHECK_GT(ci, last_durable_decree());
auto learn_dir = ::dsn::utils::filesystem::remove_file_name(state.files[0]);
auto chkpt_dir = ::dsn::utils::filesystem::path_combine(data_dir(), chkpt_get_dir_name(ci));
if (::dsn::utils::filesystem::rename_path(learn_dir, chkpt_dir)) {
::dsn::utils::auto_lock<::dsn::utils::ex_lock_nr> l(_checkpoints_lock);
- dassert(ci > last_durable_decree(),
- "%" PRId64 " VS %" PRId64 "",
- ci,
- last_durable_decree());
+ CHECK_GT(ci, last_durable_decree());
_checkpoints.push_back(ci);
if (!_checkpoints.empty()) {
- dassert(ci > _checkpoints.back(),
- "%" PRId64 " VS %" PRId64 "",
- ci,
- _checkpoints.back());
+ CHECK_GT(ci, _checkpoints.back());
}
set_last_durable_decree(ci);
err = ::dsn::ERR_OK;
@@ -2243,7 +2234,7 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode,
}
CHECK(_is_open, "");
- dassert(ci == last_durable_decree(), "%" PRId64 " VS %" PRId64 "", ci, last_durable_decree());
+ CHECK_EQ(ci, last_durable_decree());
LOG_INFO("%s: apply checkpoint succeed, last_durable_decree = %" PRId64,
replica_name(),
diff --git a/src/server/pegasus_server_impl_init.cpp b/src/server/pegasus_server_impl_init.cpp
index aa17fd647..d45ac3d5c 100644
--- a/src/server/pegasus_server_impl_init.cpp
+++ b/src/server/pegasus_server_impl_init.cpp
@@ -169,7 +169,7 @@ pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
100000000,
"get/multi-get operation duration exceed this threshold will be logged");
_slow_query_threshold_ns = _slow_query_threshold_ns_in_config;
- dassert(_slow_query_threshold_ns > 0, "slow query threshold must be greater than 0");
+ CHECK_GT(_slow_query_threshold_ns, 0);
_abnormal_get_size_threshold = dsn_config_get_value_uint64(
"pegasus.server",
"rocksdb_abnormal_get_size_threshold",
diff --git a/src/server/pegasus_server_write.cpp b/src/server/pegasus_server_write.cpp
index f3d8b21eb..bb989a6fc 100644
--- a/src/server/pegasus_server_write.cpp
+++ b/src/server/pegasus_server_write.cpp
@@ -61,7 +61,7 @@ int pegasus_server_write::on_batched_write_requests(dsn::message_ex **requests,
try {
auto iter = _non_batch_write_handlers.find(requests[0]->rpc_code());
if (iter != _non_batch_write_handlers.end()) {
- dassert_f(count == 1, "count = {}", count);
+ CHECK_EQ(count, 1);
return iter->second(requests[0]);
}
} catch (TTransportException &ex) {
@@ -140,10 +140,10 @@ void pegasus_server_write::request_key_check(int64_t decree,
// TODO(wutao1): server should not assert when client's hash is incorrect.
if (msg->header->client.partition_hash != 0) {
uint64_t partition_hash = pegasus_key_hash(key);
- dassert(msg->header->client.partition_hash == partition_hash,
- "inconsistent partition hash");
+ CHECK_EQ_MSG(
+ msg->header->client.partition_hash, partition_hash, "inconsistent partition hash");
int thread_hash = get_gpid().thread_hash();
- dassert(msg->header->client.thread_hash == thread_hash, "inconsistent thread hash");
+ CHECK_EQ_MSG(msg->header->client.thread_hash, thread_hash, "inconsistent thread hash");
}
if (_verbose_log) {
diff --git a/src/server/pegasus_write_service.cpp b/src/server/pegasus_write_service.cpp
index dc0a24333..1b4cf0974 100644
--- a/src/server/pegasus_write_service.cpp
+++ b/src/server/pegasus_write_service.cpp
@@ -234,8 +234,8 @@ int pegasus_write_service::check_and_mutate(int64_t decree,
void pegasus_write_service::batch_prepare(int64_t decree)
{
- dassert(_batch_start_time == 0,
- "batch_prepare and batch_commit/batch_abort must be called in pair");
+ CHECK_EQ_MSG(
+ _batch_start_time, 0, "batch_prepare and batch_commit/batch_abort must be called in pair");
_batch_start_time = dsn_now_ns();
}
diff --git a/src/server/pegasus_write_service_impl.h b/src/server/pegasus_write_service_impl.h
index 70b90cc29..6fb15a0c5 100644
--- a/src/server/pegasus_write_service_impl.h
+++ b/src/server/pegasus_write_service_impl.h
@@ -446,9 +446,7 @@ public:
resp.error = _rocksdb_wrapper->write_batch_put(
decree, key, m.value, static_cast<uint32_t>(m.set_expire_ts_seconds));
} else {
- dassert_f(m.operation == ::dsn::apps::mutate_operation::MO_DELETE,
- "m.operation = %d",
- m.operation);
+ CHECK_EQ(m.operation, ::dsn::apps::mutate_operation::MO_DELETE);
resp.error = _rocksdb_wrapper->write_batch_delete(decree, key);
}
diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h
index c1185d244..aff9b2adf 100644
--- a/src/shell/command_helper.h
+++ b/src/shell/command_helper.h
@@ -196,7 +196,7 @@ struct scan_data_context
{
// max_batch_count should > 1 because scan may be terminated
// when split_request_count = 1
- dassert(max_batch_count > 1, "");
+ CHECK_GT(max_batch_count, 1);
}
void set_sort_key_filter(pegasus::pegasus_client::filter_type type, const std::string &pattern)
{
@@ -1014,11 +1014,8 @@ get_app_partitions(shell_context *sc,
LOG_ERROR("list app %s failed, error = %s", app.app_name.c_str(), err.to_string());
return false;
}
- dassert(app_id == app.app_id, "%d VS %d", app_id, app.app_id);
- dassert(partition_count == app.partition_count,
- "%d VS %d",
- partition_count,
- app.partition_count);
+ CHECK_EQ(app_id, app.app_id);
+ CHECK_EQ(partition_count, app.partition_count);
}
return true;
}
@@ -1200,11 +1197,8 @@ get_app_stat(shell_context *sc, const std::string &app_name, std::vector<row_dat
LOG_ERROR("list app %s failed, error = %s", app_name.c_str(), err.to_string());
return false;
}
- dassert(app_id == app_info->app_id, "%d VS %d", app_id, app_info->app_id);
- dassert(partition_count == app_info->partition_count,
- "%d VS %d",
- partition_count,
- app_info->partition_count);
+ CHECK_EQ(app_id, app_info->app_id);
+ CHECK_EQ(partition_count, app_info->partition_count);
for (int i = 0; i < nodes.size(); ++i) {
dsn::rpc_address node_addr = nodes[i].address;
@@ -1217,8 +1211,8 @@ get_app_stat(shell_context *sc, const std::string &app_name, std::vector<row_dat
bool parse_ret = parse_app_pegasus_perf_counter_name(
m.name, app_id_x, partition_index_x, counter_name);
CHECK(parse_ret, "name = {}", m.name);
- dassert(app_id_x == app_id, "name = %s", m.name.c_str());
- dassert(partition_index_x < partition_count, "name = %s", m.name.c_str());
+ CHECK_EQ_MSG(app_id_x, app_id, "name = {}", m.name);
+ CHECK_LT_MSG(partition_index_x, partition_count, "name = {}", m.name);
if (partitions[partition_index_x].primary != node_addr)
continue;
update_app_pegasus_perf_counter(rows[partition_index_x], counter_name, m.value);
diff --git a/src/shell/commands/data_operations.cpp b/src/shell/commands/data_operations.cpp
index 139203842..e9adb0df5 100644
--- a/src/shell/commands/data_operations.cpp
+++ b/src/shell/commands/data_operations.cpp
@@ -2570,7 +2570,7 @@ bool count_data(command_executor *e, shell_context *sc, arguments args)
std::string unescape_str(const char *escaped)
{
std::string dst, src = escaped;
- dassert(pegasus::utils::c_unescape_string(src, dst) >= 0, "");
+ CHECK_GE(pegasus::utils::c_unescape_string(src, dst), 0);
return dst;
}
diff --git a/src/test/kill_test/data_verifier.cpp b/src/test/kill_test/data_verifier.cpp
index 73d64d49e..d4b5548f6 100644
--- a/src/test/kill_test/data_verifier.cpp
+++ b/src/test/kill_test/data_verifier.cpp
@@ -46,6 +46,7 @@
#include "pegasus/client.h"
#include "data_verifier.h"
+#include "utils/fmt_logging.h"
using namespace std;
using namespace ::pegasus;
@@ -349,7 +350,7 @@ void do_mark()
while (true) {
sleep(1);
long long new_id = get_min_thread_setting_id();
- dassert(new_id >= old_id, "%" PRId64 " VS %" PRId64 "", new_id, old_id);
+ CHECK_GE(new_id, old_id);
if (new_id == old_id) {
continue;
}
diff --git a/src/test/kill_test/killer_handler_shell.cpp b/src/test/kill_test/killer_handler_shell.cpp
index a18e9d2f7..315ece9da 100644
--- a/src/test/kill_test/killer_handler_shell.cpp
+++ b/src/test/kill_test/killer_handler_shell.cpp
@@ -37,7 +37,7 @@ killer_handler_shell::killer_handler_shell()
const char *section = "killer.handler.shell";
_run_script_path = dsn_config_get_value_string(
section, "onebox_run_path", "~/pegasus/run.sh", "onebox run path");
- dassert(_run_script_path.size() > 0, "");
+ CHECK(!_run_script_path.empty(), "");
}
bool killer_handler_shell::has_meta_dumped_core(int index)
diff --git a/src/test/kill_test/process_kill_testor.cpp b/src/test/kill_test/process_kill_testor.cpp
index 1366970bc..c49df77d4 100644
--- a/src/test/kill_test/process_kill_testor.cpp
+++ b/src/test/kill_test/process_kill_testor.cpp
@@ -51,7 +51,7 @@ process_kill_testor::process_kill_testor(const char *config_file) : kill_testor(
// initialize killer_handler
std::string killer_name =
dsn_config_get_value_string(section, "killer_handler", "", "killer handler");
- dassert(killer_name.size() > 0, "");
+ CHECK(!killer_name.empty(), "");
_killer_handler.reset(killer_handler::new_handler(killer_name.c_str()));
CHECK(_killer_handler, "invalid killer_name({})", killer_name);
diff --git a/src/test/pressure_test/main.cpp b/src/test/pressure_test/main.cpp
index 7e0394527..d79df6317 100644
--- a/src/test/pressure_test/main.cpp
+++ b/src/test/pressure_test/main.cpp
@@ -249,7 +249,7 @@ int main(int argc, const char **argv)
value_len =
(int32_t)dsn_config_get_value_uint64("pressureclient", "value_len", 64, "value length");
- dassert(qps > 0, "qps must GT 0, but qps(%d)", qps);
+ CHECK_GT(qps, 0);
CHECK(!op_name.empty(), "must assign operation name");
LOG_INFO("pressureclient %s qps = %d", op_name.c_str(), qps);
diff --git a/src/utils/alloc.cpp b/src/utils/alloc.cpp
index b2028b636..a0b1afb66 100644
--- a/src/utils/alloc.cpp
+++ b/src/utils/alloc.cpp
@@ -42,7 +42,7 @@ namespace dsn {
// [ENOMEM]
// There is insufficient memory available with the requested alignment.
// Thus making an assertion here is enough.
- dassert_f(err == 0, "error calling posix_memalign: {}", utils::safe_strerror(err).c_str());
+ CHECK_EQ_MSG(err, 0, "error calling posix_memalign: {}", utils::safe_strerror(err));
return buffer;
}
diff --git a/src/utils/alloc.h b/src/utils/alloc.h
index df07d1982..9e7a98378 100644
--- a/src/utils/alloc.h
+++ b/src/utils/alloc.h
@@ -51,16 +51,17 @@ cacheline_aligned_ptr<T> cacheline_aligned_alloc_array(size_t len)
if (sizeof(T) <= CACHELINE_SIZE && (sizeof(T) & (sizeof(T) - 1)) == 0) {
for (size_t i = 0; i < len; ++i) {
T *elem = &(array[i]);
- dassert_f((reinterpret_cast<const uintptr_t>(elem) & (sizeof(T) - 1)) == 0,
- "unaligned array element for cache line: array={}, length={}, index={}, "
- "elem={}, elem_size={}, mask={}, cacheline_size={}",
- fmt::ptr(array),
- len,
- i,
- fmt::ptr(elem),
- sizeof(T),
- sizeof(T) - 1,
- CACHELINE_SIZE);
+ CHECK_EQ_MSG((reinterpret_cast<const uintptr_t>(elem) & (sizeof(T) - 1)),
+ 0,
+ "unaligned array element for cache line: array={}, length={}, index={}, "
+ "elem={}, elem_size={}, mask={}, cacheline_size={}",
+ fmt::ptr(array),
+ len,
+ i,
+ fmt::ptr(elem),
+ sizeof(T),
+ sizeof(T) - 1,
+ CACHELINE_SIZE);
}
}
#endif
diff --git a/src/utils/command_manager.cpp b/src/utils/command_manager.cpp
index 5ec71d593..d21127708 100644
--- a/src/utils/command_manager.cpp
+++ b/src/utils/command_manager.cpp
@@ -25,11 +25,12 @@
*/
#include <iostream>
-#include <thread>
#include <sstream>
+#include <thread>
-#include "utils/utils.h"
#include "utils/command_manager.h"
+#include "utils/fmt_logging.h"
+#include "utils/utils.h"
namespace dsn {
@@ -45,7 +46,7 @@ dsn_handle_t command_manager::register_command(const std::vector<std::string> &c
if (!cmd.empty()) {
is_valid_cmd = true;
auto it = _handlers.find(cmd);
- dassert(it == _handlers.end(), "command '%s' already regisered", cmd.c_str());
+ CHECK(it == _handlers.end(), "command '{}' already regisered", cmd);
}
}
dassert(is_valid_cmd, "should not register empty command");
diff --git a/src/utils/filesystem.cpp b/src/utils/filesystem.cpp
index 52c88a42c..781116928 100644
--- a/src/utils/filesystem.cpp
+++ b/src/utils/filesystem.cpp
@@ -671,7 +671,7 @@ error_code get_process_image_path(int pid, std::string &path)
err = snprintf_p(
tmp, ARRAYSIZE(tmp), "/proc/%s/exe", (pid == -1) ? "self" : std::to_string(pid).c_str());
- dassert(err >= 0, "snprintf_p failed.");
+ CHECK_GE(err, 0);
err = (int)readlink(tmp, tls_path_buffer, TLS_PATH_BUFFER_SIZE);
if (err == -1) {
@@ -806,11 +806,12 @@ error_code read_file(const std::string &fname, std::string &buf)
return ERR_FILE_OPERATION_FAILED;
}
fin.read(&buf[0], file_sz);
- dassert_f(file_sz == fin.gcount(),
- "read file({}) failed, file_size = {} but read size = {}",
- fname,
- file_sz,
- fin.gcount());
+ CHECK_EQ_MSG(file_sz,
+ fin.gcount(),
+ "read file({}) failed, file_size = {} but read size = {}",
+ fname,
+ file_sz,
+ fin.gcount());
fin.close();
return ERR_OK;
}
diff --git a/src/utils/fmt_logging.h b/src/utils/fmt_logging.h
index ebb886560..3662ec927 100644
--- a/src/utils/fmt_logging.h
+++ b/src/utils/fmt_logging.h
@@ -59,17 +59,90 @@
// Macros to check expected condition. It will abort the application
// and log a fatal message when the condition is not met.
-#define CHECK_EQ(var1, var2) CHECK(var1 == var2, "{} vs {}", var1, var2)
-#define CHECK_GE(var1, var2) CHECK(var1 >= var2, "{} vs {}", var1, var2)
-#define CHECK_LE(var1, var2) CHECK(var1 <= var2, "{} vs {}", var1, var2)
-#define CHECK_GT(var1, var2) CHECK(var1 > var2, "{} vs {}", var1, var2)
-#define CHECK_LT(var1, var2) CHECK(var1 < var2, "{} vs {}", var1, var2)
-
-#define CHECK_EQ_PREFIX(var1, var2) dassert_replica(var1 == var2, "{} vs {}", var1, var2)
-#define CHECK_GE_PREFIX(var1, var2) dassert_replica(var1 >= var2, "{} vs {}", var1, var2)
-#define CHECK_LE_PREFIX(var1, var2) dassert_replica(var1 <= var2, "{} vs {}", var1, var2)
-#define CHECK_GT_PREFIX(var1, var2) dassert_replica(var1 > var2, "{} vs {}", var1, var2)
-#define CHECK_LT_PREFIX(var1, var2) dassert_replica(var1 < var2, "{} vs {}", var1, var2)
+#define CHECK_NE_MSG(var1, var2, ...) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ CHECK(_v1 != _v2, "{} vs {} {}", _v1, _v2, fmt::format(__VA_ARGS__)); \
+ } while (false)
+
+#define CHECK_EQ_MSG(var1, var2, ...) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ CHECK(_v1 == _v2, "{} vs {} {}", _v1, _v2, fmt::format(__VA_ARGS__)); \
+ } while (false)
+
+#define CHECK_GE_MSG(var1, var2, ...) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ CHECK(_v1 >= _v2, "{} vs {} {}", _v1, _v2, fmt::format(__VA_ARGS__)); \
+ } while (false)
+
+#define CHECK_LE_MSG(var1, var2, ...) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ CHECK(_v1 <= _v2, "{} vs {} {}", _v1, _v2, fmt::format(__VA_ARGS__)); \
+ } while (false)
+
+#define CHECK_GT_MSG(var1, var2, ...) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ CHECK(_v1 > _v2, "{} vs {} {}", _v1, _v2, fmt::format(__VA_ARGS__)); \
+ } while (false)
+
+#define CHECK_LT_MSG(var1, var2, ...) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ CHECK(_v1 < _v2, "{} vs {} {}", _v1, _v2, fmt::format(__VA_ARGS__)); \
+ } while (false)
+
+#define CHECK_NE(var1, var2) CHECK_NE_MSG(var1, var2, "")
+#define CHECK_EQ(var1, var2) CHECK_EQ_MSG(var1, var2, "")
+#define CHECK_GE(var1, var2) CHECK_GE_MSG(var1, var2, "")
+#define CHECK_LE(var1, var2) CHECK_LE_MSG(var1, var2, "")
+#define CHECK_GT(var1, var2) CHECK_GT_MSG(var1, var2, "")
+#define CHECK_LT(var1, var2) CHECK_LT_MSG(var1, var2, "")
+
+// TODO(yingchun): add CHECK_NULL(ptr), CHECK_OK(err), CHECK(cond)
+#define CHECK_EQ_PREFIX(var1, var2) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ dassert_replica(_v1 == _v2, "{} vs {}", _v1, _v2); \
+ } while (false)
+
+#define CHECK_GE_PREFIX(var1, var2) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ dassert_replica(_v1 >= _v2, "{} vs {}", _v1, _v2); \
+ } while (false)
+
+#define CHECK_LE_PREFIX(var1, var2) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ dassert_replica(_v1 <= _v2, "{} vs {}", _v1, _v2); \
+ } while (false)
+
+#define CHECK_GT_PREFIX(var1, var2) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ dassert_replica(_v1 > _v2, "{} vs {}", _v1, _v2); \
+ } while (false)
+
+#define CHECK_LT_PREFIX(var1, var2) \
+ do { \
+ const auto &_v1 = (var1); \
+ const auto &_v2 = (var2); \
+ dassert_replica(_v1 < _v2, "{} vs {}", _v1, _v2); \
+ } while (false)
// Return the given status if condition is not true.
#define ERR_LOG_AND_RETURN_NOT_TRUE(s, err, ...) \
diff --git a/src/utils/long_adder.cpp b/src/utils/long_adder.cpp
index c5c550df1..f9789ff0c 100644
--- a/src/utils/long_adder.cpp
+++ b/src/utils/long_adder.cpp
@@ -73,7 +73,7 @@ cacheline_aligned_int64 *const kCellsLocked = reinterpret_cast<cacheline_aligned
// [ENOMEM]
// There is insufficient memory available with the requested alignment.
// Thus making an assertion here is enough.
- dassert_f(err == 0, "error calling posix_memalign: {}", utils::safe_strerror(err).c_str());
+ CHECK_EQ_MSG(err, 0, "error calling posix_memalign: {}", utils::safe_strerror(err));
cacheline_aligned_int64 *array = new (buffer) cacheline_aligned_int64[size];
for (uint32_t i = 0; i < size; ++i) {
diff --git a/src/utils/math.cpp b/src/utils/math.cpp
index 6ec52779c..f8ebf306a 100644
--- a/src/utils/math.cpp
+++ b/src/utils/math.cpp
@@ -15,10 +15,13 @@
// specific language governing permissions and limitations
// under the License.
-#include <numeric>
+#include "math.h"
+
#include <algorithm>
-#include <math.h>
+#include <numeric>
+
#include "utils/api_utilities.h"
+#include "utils/fmt_logging.h"
#include "utils/math.h"
namespace dsn {
@@ -26,7 +29,7 @@ namespace utils {
double mean_stddev(const std::vector<uint32_t> &result_set, bool partial_sample)
{
- dassert(result_set.size() > 1, "invalid sample data input for stddev");
+ CHECK_GT_MSG(result_set.size(), 1, "invalid sample data input for stddev");
double sum = std::accumulate(result_set.begin(), result_set.end(), 0.0);
double mean = sum / result_set.size();
diff --git a/src/utils/metrics.cpp b/src/utils/metrics.cpp
index a67331a88..07a1210f2 100644
--- a/src/utils/metrics.cpp
+++ b/src/utils/metrics.cpp
@@ -99,7 +99,7 @@ void metric_entity::set_attributes(attr_map &&attrs)
metric_entity_ptr metric_entity_prototype::instantiate(const std::string &id,
metric_entity::attr_map attrs) const
{
- dassert_f(attrs.find("entity") == attrs.end(), "{}'s attribute \"entity\" is reserved", id);
+ CHECK(attrs.find("entity") == attrs.end(), "{}'s attribute \"entity\" is reserved", id);
attrs["entity"] = _name;
return metric_registry::instance().find_or_create_entity(id, std::move(attrs));
@@ -243,9 +243,10 @@ void percentile_timer::on_timer(const boost::system::error_code &ec)
} while (0)
if (dsn_unlikely(!!ec)) {
- dassert_f(ec == boost::system::errc::operation_canceled,
- "failed to exec on_timer with an error that cannot be handled: {}",
- ec.message());
+ CHECK_EQ_MSG(ec,
+ boost::system::errc::operation_canceled,
+ "failed to exec on_timer with an error that cannot be handled: {}",
+ ec.message());
// Cancel can only be launched by close().
auto expected_state = state::kClosing;
diff --git a/src/utils/metrics.h b/src/utils/metrics.h
index c2082befe..87cbb3dc2 100644
--- a/src/utils/metrics.h
+++ b/src/utils/metrics.h
@@ -541,7 +541,7 @@ public:
// NOTICE: x MUST be a non-negative integer.
void increment_by(int64_t x)
{
- dassert_f(x >= 0, "delta({}) by increment for counter must be a non-negative integer", x);
+ CHECK_GE_MSG(x, 0, "delta({}) by increment for counter must be a non-negative integer", x);
_adder.increment_by(x);
}
@@ -782,10 +782,9 @@ protected:
_nth_element_finder(),
_timer()
{
- dassert(_sample_size > 0 && (_sample_size & (_sample_size - 1)) == 0,
- "sample_sizes should be > 0 and power of 2");
-
- dassert(_samples, "_samples should be valid pointer");
+ CHECK(_sample_size > 0 && (_sample_size & (_sample_size - 1)) == 0,
+ "sample_sizes should be > 0 and power of 2");
+ CHECK(_samples, "");
for (const auto &kth : kth_percentiles) {
_kth_percentile_bitset.set(static_cast<size_t>(kth));
diff --git a/src/utils/nth_element.h b/src/utils/nth_element.h
index e15893358..1ee161a12 100644
--- a/src/utils/nth_element.h
+++ b/src/utils/nth_element.h
@@ -89,7 +89,7 @@ public:
{
for (size_type i = 0; i < _nths.size();) {
auto nth_iter = begin + _nths[i];
- dassert_f(nth_iter >= first && nth_iter < last, "Invalid iterators for nth_element()");
+ CHECK(nth_iter >= first && nth_iter < last, "Invalid iterators for nth_element()");
std::nth_element(first, nth_iter, last, _comp);
_elements[i] = *nth_iter;
diff --git a/src/utils/output_utils.cpp b/src/utils/output_utils.cpp
index 5358c1cbe..03b1d64de 100644
--- a/src/utils/output_utils.cpp
+++ b/src/utils/output_utils.cpp
@@ -68,7 +68,7 @@ void table_printer::add_title(const std::string &title, alignment align)
void table_printer::add_column(const std::string &col_name, alignment align)
{
check_mode(data_mode::kMultiColumns);
- dassert(_matrix_data.size() == 1, "`add_column` must be called before real data appendding");
+ CHECK_EQ_MSG(_matrix_data.size(), 1, "'add_column' must be called before real data appendding");
_max_col_width.push_back(col_name.length());
_align_left.push_back(align == alignment::kLeft);
append_data(col_name);
@@ -117,7 +117,7 @@ void table_printer::output_in_tabular(std::ostream &out) const
if (_mode == data_mode::kSingleColumn) {
separator = ": ";
} else {
- dassert(_mode == data_mode::kMultiColumns, "Unknown mode");
+ CHECK(_mode == data_mode::kMultiColumns, "Unknown mode");
}
if (!_name.empty()) {
@@ -138,7 +138,7 @@ void table_printer::append_string_data(const std::string &data)
{
_matrix_data.rbegin()->emplace_back(data);
int last_index = _matrix_data.rbegin()->size() - 1;
- dassert(last_index <= _max_col_width.size(), "column data exceed");
+ CHECK_LE_MSG(last_index, _max_col_width.size(), "column data exceed");
// update column max length
int &cur_len = _max_col_width[last_index];
@@ -153,7 +153,7 @@ void table_printer::check_mode(data_mode mode)
_mode = mode;
return;
}
- dassert(_mode == mode, "");
+ CHECK(_mode == mode, "");
}
void multi_table_printer::add(table_printer &&tp) { _tps.emplace_back(std::move(tp)); }
diff --git a/src/utils/rpc_address.h b/src/utils/rpc_address.h
index 20a859c70..662019096 100644
--- a/src/utils/rpc_address.h
+++ b/src/utils/rpc_address.h
@@ -183,6 +183,11 @@ public:
}
}
+ friend std::ostream &operator<<(std::ostream &os, const rpc_address &addr)
+ {
+ return os << addr.to_string();
+ }
+
// for serialization in thrift format
uint32_t read(::apache::thrift::protocol::TProtocol *iprot);
uint32_t write(::apache::thrift::protocol::TProtocol *oprot) const;
diff --git a/src/utils/test/metrics_test.cpp b/src/utils/test/metrics_test.cpp
index ec3a080cd..511ecbfc0 100644
--- a/src/utils/test/metrics_test.cpp
+++ b/src/utils/test/metrics_test.cpp
@@ -676,7 +676,7 @@ void run_percentile(const metric_entity_ptr &my_entity,
const std::vector<T> &expected_elements,
Checker checker)
{
- dassert_f(num_threads > 0, "Invalid num_threads({})", num_threads);
+ CHECK_GT(num_threads, 0);
CHECK(data.size() <= sample_size && data.size() % num_threads == 0,
"Invalid arguments, data_size={}, sample_size={}, num_threads={}",
data.size(),
diff --git a/src/utils/thread_access_checker.cpp b/src/utils/thread_access_checker.cpp
index 0d09a064c..6ebe13261 100644
--- a/src/utils/thread_access_checker.cpp
+++ b/src/utils/thread_access_checker.cpp
@@ -23,9 +23,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
+#include "utils/api_utilities.h"
+#include "utils/fmt_logging.h"
#include "utils/process_utils.h"
#include "utils/thread_access_checker.h"
-#include "utils/api_utilities.h"
namespace dsn {
@@ -36,8 +38,9 @@ thread_access_checker::~thread_access_checker() { _access_thread_id_inited = fal
void thread_access_checker::only_one_thread_access()
{
if (_access_thread_id_inited) {
- dassert(::dsn::utils::get_current_tid() == _access_thread_id,
- "the service is assumed to be accessed by one thread only!");
+ CHECK_EQ_MSG(::dsn::utils::get_current_tid(),
+ _access_thread_id,
+ "the service is assumed to be accessed by one thread only!");
} else {
_access_thread_id = ::dsn::utils::get_current_tid();
_access_thread_id_inited = true;
diff --git a/src/zookeeper/lock_struct.cpp b/src/zookeeper/lock_struct.cpp
index e8b9dce11..c97f7665d 100644
--- a/src/zookeeper/lock_struct.cpp
+++ b/src/zookeeper/lock_struct.cpp
@@ -57,7 +57,7 @@ static const char *states[] = {
static inline const char *string_state(lock_state state)
{
- dassert(state < lock_state::state_count, "state = %d", (int)(state));
+ CHECK_LT(state, lock_state::state_count);
return states[state];
}
@@ -73,7 +73,7 @@ static bool is_zookeeper_timeout(int zookeeper_error)
if (code == allow_list[i]) \
break; \
} \
- dassert(i < allow_list_size, "invalid code(%s)", code_str); \
+ CHECK_LT_MSG(i, allow_list_size, "invalid code({})", code_str); \
} while (0)
#define __execute(cb, _this) tasking::enqueue(TASK_CODE_DLOCK, nullptr, cb, _this->hash())
@@ -368,11 +368,12 @@ void lock_struct::after_self_check(lock_struct_ptr _this,
_this->on_expire();
return;
}
- dassert(*value == _this->_myself._node_value,
- "lock(%s) get wrong value, local myself(%s), from zookeeper(%s)",
- _this->_lock_id.c_str(),
- _this->_myself._node_value.c_str(),
- value->c_str());
+ CHECK_EQ_MSG(*value,
+ _this->_myself._node_value,
+ "lock({}) get wrong value, local myself({}), from zookeeper({})",
+ _this->_lock_id,
+ _this->_myself._node_value,
+ *value);
}
void lock_struct::get_lock_owner(bool watch_myself)
@@ -486,10 +487,11 @@ void lock_struct::after_get_lockdir_nodes(lock_struct_ptr _this,
bool watch_myself = false;
if (min_seq == myself_seq) {
// i am the smallest one, so i get the lock :-)
- dassert(min_pos == my_pos,
- "same sequence node number on zookeeper, dir(%s), number(%d)",
- _this->_lock_dir.c_str(),
- myself_seq);
+ CHECK_EQ_MSG(min_pos,
+ my_pos,
+ "same sequence node number on zookeeper, dir({}), number({})",
+ _this->_lock_dir,
+ myself_seq);
_this->_state = lock_state::locked;
_this->_owner._node_value = _this->_myself._node_value;
_this->_dist_lock_service->refresh_lock_cache(
diff --git a/src/zookeeper/test/distributed_lock_zookeeper.cpp b/src/zookeeper/test/distributed_lock_zookeeper.cpp
index da6e7e62a..634f7a19e 100644
--- a/src/zookeeper/test/distributed_lock_zookeeper.cpp
+++ b/src/zookeeper/test/distributed_lock_zookeeper.cpp
@@ -77,7 +77,7 @@ public:
_dlock_service = new distributed_lock_service_zookeeper();
auto err = _dlock_service->initialize({"/dsn/tests/simple_adder_server"});
- dassert(err == ERR_OK, "err = %s", err.to_string());
+ CHECK_EQ(err, ERR_OK);
distributed_lock_service::lock_options opt = {true, true};
while (!ss_finish) {
diff --git a/src/zookeeper/zookeeper_session.cpp b/src/zookeeper/zookeeper_session.cpp
index 4a7007b4c..5bdb201e8 100644
--- a/src/zookeeper/zookeeper_session.cpp
+++ b/src/zookeeper/zookeeper_session.cpp
@@ -322,7 +322,7 @@ void zookeeper_session::global_watcher(
if (type != ZOO_SESSION_EVENT && path != nullptr)
LOG_INFO("watcher path: %s", path);
- dassert(zoo_session->_handle == handle, "");
+ CHECK(zoo_session->_handle == handle, "");
zoo_session->dispatch_event(type, state, type == ZOO_SESSION_EVENT ? "" : path);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pegasus.apache.org
For additional commands, e-mail: commits-help@pegasus.apache.org