You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pegasus.apache.org by la...@apache.org on 2023/01/18 16:07:55 UTC

[incubator-pegasus] branch master updated: refactor(log): use LOG_WARNING_F instead of LOG_WARNING (2/3) (#1316)

This is an automated email from the ASF dual-hosted git repository.

laiyingchun pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git


The following commit(s) were added to refs/heads/master by this push:
     new 16d9e3253 refactor(log): use LOG_WARNING_F instead of LOG_WARNING (2/3) (#1316)
16d9e3253 is described below

commit 16d9e3253e25dcaae81e3096ed55d91b3d71b636
Author: WHBANG <38...@users.noreply.github.com>
AuthorDate: Thu Jan 19 00:07:49 2023 +0800

    refactor(log): use LOG_WARNING_F instead of LOG_WARNING (2/3) (#1316)
---
 src/meta/load_balance_policy.cpp          |   6 +-
 src/meta/meta_backup_service.cpp          |  70 +++++++++----------
 src/meta/meta_data.cpp                    |  11 ++-
 src/meta/meta_server_failure_detector.cpp |  13 ++--
 src/meta/meta_service.cpp                 |   2 +-
 src/meta/meta_state_service_zookeeper.cpp |   2 +-
 src/meta/partition_guardian.cpp           |  28 ++++----
 src/meta/server_state.cpp                 | 108 ++++++++++++++----------------
 8 files changed, 114 insertions(+), 126 deletions(-)

diff --git a/src/meta/load_balance_policy.cpp b/src/meta/load_balance_policy.cpp
index bb43d44c7..75e74c314 100644
--- a/src/meta/load_balance_policy.cpp
+++ b/src/meta/load_balance_policy.cpp
@@ -61,10 +61,8 @@ bool calc_disk_load(node_mapper &nodes,
         const config_context &cc = *get_config_context(apps, pid);
         auto iter = cc.find_from_serving(node);
         if (iter == cc.serving.end()) {
-            LOG_WARNING("can't collect gpid(%d.%d)'s info from %s, which should be primary",
-                        pid.get_app_id(),
-                        pid.get_partition_index(),
-                        node.to_string());
+            LOG_WARNING_F(
+                "can't collect gpid({})'s info from {}, which should be primary", pid, node);
             return false;
         } else {
             load[iter->disk_tag]++;
diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp
index fa2db91a1..2c7ba989a 100644
--- a/src/meta/meta_backup_service.cpp
+++ b/src/meta/meta_backup_service.cpp
@@ -58,9 +58,9 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id)
     // if app is dropped when app is under backuping, we just skip backup this app this time, and
     // also we will not write backup-finish-flag on fds
     if (!app_available) {
-        LOG_WARNING(
-            "%s: can't encode app_info for app(%d), perhaps removed, treat it as backup finished",
-            _backup_sig.c_str(),
+        LOG_WARNING_F(
+            "{}: can't encode app_info for app({}), perhaps removed, treat it as backup finished",
+            _backup_sig,
             app_id);
         auto iter = _progress.unfinished_partitions_per_app.find(app_id);
         CHECK(iter != _progress.unfinished_partitions_per_app.end(),
@@ -134,9 +134,9 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id)
                             resp.err.to_string());
                 return;
             } else {
-                LOG_WARNING("write %s failed, reason(%s), try it later",
-                            remote_file->file_name().c_str(),
-                            resp.err.to_string());
+                LOG_WARNING_F("write {} failed, reason({}), try it later",
+                              remote_file->file_name(),
+                              resp.err);
                 tasking::enqueue(LPC_DEFAULT_CALLBACK,
                                  &_tracker,
                                  [this, app_id]() {
@@ -166,7 +166,8 @@ void policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id,
                                                            dsn::task_ptr write_callback)
 {
     if (_progress.is_app_skipped[app_id]) {
-        LOG_WARNING("app is unavaliable, skip write finish flag for this app(app_id = %d)", app_id);
+        LOG_WARNING_F("app is unavaliable, skip write finish flag for this app(app_id = {})",
+                      app_id);
         if (write_callback != nullptr) {
             write_callback->enqueue();
         }
@@ -250,9 +251,9 @@ void policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id,
                             resp.err.to_string());
                 return;
             } else {
-                LOG_WARNING("write %s failed, reason(%s), try it later",
-                            remote_file->file_name().c_str(),
-                            resp.err.to_string());
+                LOG_WARNING_F("write {} failed, reason({}), try it later",
+                              remote_file->file_name(),
+                              resp.err);
                 tasking::enqueue(LPC_DEFAULT_CALLBACK,
                                  &_tracker,
                                  [this, app_id, write_callback]() {
@@ -357,9 +358,9 @@ void policy_context::write_backup_info_unlocked(const backup_info &b_info,
                             resp.err.to_string());
                 return;
             } else {
-                LOG_WARNING("write %s failed, reason(%s), try it later",
-                            remote_file->file_name().c_str(),
-                            resp.err.to_string());
+                LOG_WARNING_F("write {} failed, reason({}), try it later",
+                              remote_file->file_name(),
+                              resp.err);
                 tasking::enqueue(LPC_DEFAULT_CALLBACK,
                                  &_tracker,
                                  [this, b_info, write_callback]() {
@@ -569,12 +570,12 @@ void policy_context::initialize_backup_progress_unlocked()
         const std::shared_ptr<app_state> &app = _backup_service->get_state()->get_app(app_id);
         _progress.is_app_skipped[app_id] = true;
         if (app == nullptr) {
-            LOG_WARNING("%s: app id(%d) is invalid", _policy.policy_name.c_str(), app_id);
+            LOG_WARNING_F("{}: app id({}) is invalid", _policy.policy_name, app_id);
         } else if (app->status != app_status::AS_AVAILABLE) {
-            LOG_WARNING("%s: %s is not available, status(%s)",
-                        _policy.policy_name.c_str(),
-                        app->get_logname(),
-                        enum_to_string(app->status));
+            LOG_WARNING_F("{}: {} is not available, status({})",
+                          _policy.policy_name,
+                          app->get_logname(),
+                          enum_to_string(app->status));
         } else {
             // NOTICE: only available apps have entry in
             // unfinished_partitions_per_app & partition_progress & app_chkpt_size
@@ -621,7 +622,7 @@ void policy_context::sync_backup_to_remote_storage_unlocked(const backup_info &b
             if (sync_callback != nullptr) {
                 sync_callback->enqueue();
             } else {
-                LOG_WARNING("%s: empty callback", _policy.policy_name.c_str());
+                LOG_WARNING_F("{}: empty callback", _policy.policy_name);
             }
         } else if (ERR_TIMEOUT == err) {
             LOG_ERROR("%s: sync backup info(" PRId64
@@ -774,8 +775,8 @@ void policy_context::issue_new_backup_unlocked()
     // if all apps are dropped, we don't issue a new backup
     if (_progress.unfinished_partitions_per_app.empty()) {
         // TODO: just ignore this backup and wait next backup
-        LOG_WARNING("%s: all apps have been dropped, ignore this backup and retry it later",
-                    _backup_sig.c_str());
+        LOG_WARNING_F("{}: all apps have been dropped, ignore this backup and retry it later",
+                      _backup_sig);
         tasking::enqueue(LPC_DEFAULT_CALLBACK,
                          &_tracker,
                          [this]() {
@@ -939,11 +940,11 @@ void policy_context::gc_backup_info_unlocked(const backup_info &info_to_gc)
                             });
                         sync_remove_backup_info(info_to_gc, remove_local_backup_info_task);
                     } else { // ERR_FS_INTERNAL, ERR_TIMEOUT, ERR_DIR_NOT_EMPTY
-                        LOG_WARNING("%s: gc backup info, id(%" PRId64
-                                    ") failed, with err = %s, just try again",
-                                    _policy.policy_name.c_str(),
-                                    info_to_gc.backup_id,
-                                    resp.err.to_string());
+                        LOG_WARNING_F(
+                            "{}: gc backup info, id({}) failed, with err = {}, just try again",
+                            _policy.policy_name,
+                            info_to_gc.backup_id,
+                            resp.err);
                         gc_backup_info_unlocked(info_to_gc);
                     }
                 });
@@ -1078,7 +1079,7 @@ void backup_service::start_sync_policies()
             policy_kv.second->start();
         }
         if (_policy_states.empty()) {
-            LOG_WARNING(
+            LOG_WARNING_F(
                 "can't sync policies from remote storage, user should config some policies");
         }
         _in_initialize.store(false);
@@ -1493,9 +1494,9 @@ void backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc
             const auto &app = _state->get_app(appid);
             // TODO: if app is dropped, how to process
             if (app == nullptr) {
-                LOG_WARNING("%s: add app to policy failed, because invalid app(%d), ignore it",
-                            cur_policy.policy_name.c_str(),
-                            appid);
+                LOG_WARNING_F("{}: add app to policy failed, because invalid app({}), ignore it",
+                              cur_policy.policy_name,
+                              appid);
             } else {
                 valid_app_ids_to_add.emplace_back(appid);
                 id_to_app_names.insert(std::make_pair(appid, app->app_name));
@@ -1545,8 +1546,7 @@ void backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc
                 LOG_INFO_F("{}: remove app({}) to policy", cur_policy.policy_name, appid);
                 have_modify_policy = true;
             } else {
-                LOG_WARNING(
-                    "%s: invalid app_id(%d)", cur_policy.policy_name.c_str(), (int32_t)appid);
+                LOG_WARNING_F("{}: invalid app_id({})", cur_policy.policy_name, appid);
             }
         }
     }
@@ -1560,9 +1560,9 @@ void backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc
             cur_policy.backup_interval_seconds = request.new_backup_interval_sec;
             have_modify_policy = true;
         } else {
-            LOG_WARNING("%s: invalid backup_interval_sec(%" PRId64 ")",
-                        cur_policy.policy_name.c_str(),
-                        request.new_backup_interval_sec);
+            LOG_WARNING_F("{}: invalid backup_interval_sec({})",
+                          cur_policy.policy_name,
+                          request.new_backup_interval_sec);
         }
     }
 
diff --git a/src/meta/meta_data.cpp b/src/meta/meta_data.cpp
index 344b2f98a..282bd5306 100644
--- a/src/meta/meta_data.cpp
+++ b/src/meta/meta_data.cpp
@@ -138,9 +138,7 @@ bool construct_replica(meta_view view, const gpid &pid, int max_replica_count)
 
     std::vector<dropped_replica> &drop_list = cc.dropped;
     if (drop_list.empty()) {
-        LOG_WARNING("construct for (%d.%d) failed, coz no replicas collected",
-                    pid.get_app_id(),
-                    pid.get_partition_index());
+        LOG_WARNING_F("construct for ({}) failed, coz no replicas collected", pid);
         return false;
     }
 
@@ -256,10 +254,9 @@ void proposal_actions::track_current_learner(const dsn::rpc_address &node, const
                 current_learner.last_prepared_decree > info.last_prepared_decree) {
 
                 // TODO: need to add a perf counter here
-                LOG_WARNING("%d.%d: learner(%s)'s progress step back, please trace this carefully",
-                            info.pid.get_app_id(),
-                            info.pid.get_partition_index(),
-                            node.to_string());
+                LOG_WARNING_F("{}: learner({})'s progress step back, please trace this carefully",
+                              info.pid,
+                              node);
             }
 
             // NOTICE: the flag may be abormal currently. it's balancer's duty to make use of the
diff --git a/src/meta/meta_server_failure_detector.cpp b/src/meta/meta_server_failure_detector.cpp
index 49e49b40c..ff24bda18 100644
--- a/src/meta/meta_server_failure_detector.cpp
+++ b/src/meta/meta_server_failure_detector.cpp
@@ -114,7 +114,7 @@ bool meta_server_failure_detector::get_leader(rpc_address *leader)
         if (err == dsn::ERR_OK && leader->from_string_ipv4(lock_owner.c_str())) {
             return (*leader) == dsn_primary_address();
         } else {
-            LOG_WARNING("query leader from cache got error(%s)", err.to_string());
+            LOG_WARNING_F("query leader from cache got error({})", err);
             leader->set_invalid();
             return false;
         }
@@ -226,9 +226,9 @@ bool meta_server_failure_detector::update_stability_stat(const fd::beacon_msg &b
             if (beacon.start_time - w.last_start_time_ms <
                 _fd_opts->stable_rs_min_running_seconds * 1000) {
                 w.unstable_restart_count++;
-                LOG_WARNING("%s encounter an unstable restart, total_count(%d)",
-                            beacon.from_addr.to_string(),
-                            w.unstable_restart_count);
+                LOG_WARNING_F("{} encounter an unstable restart, total_count({})",
+                              beacon.from_addr,
+                              w.unstable_restart_count);
             } else if (w.unstable_restart_count > 0) {
                 LOG_INFO_F("{} restart in {} ms after last restart, may recover ok, reset "
                            "it's unstable count({}) to 0",
@@ -240,8 +240,7 @@ bool meta_server_failure_detector::update_stability_stat(const fd::beacon_msg &b
 
             w.last_start_time_ms = beacon.start_time;
         } else {
-            LOG_WARNING("%s: possible encounter a staled message, ignore it",
-                        beacon.from_addr.to_string());
+            LOG_WARNING_F("{}: possible encounter a staled message, ignore it", beacon.from_addr);
         }
         return w.unstable_restart_count < _fd_opts->max_succssive_unstable_restart;
     }
@@ -256,7 +255,7 @@ void meta_server_failure_detector::on_ping(const fd::beacon_msg &beacon,
     ack.allowed = true;
 
     if (beacon.__isset.start_time && !update_stability_stat(beacon)) {
-        LOG_WARNING("%s is unstable, don't response to it's beacon", beacon.from_addr.to_string());
+        LOG_WARNING_F("{} is unstable, don't response to it's beacon", beacon.from_addr);
         return;
     }
 
diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp
index 6c74375e5..0ed9bebd3 100644
--- a/src/meta/meta_service.cpp
+++ b/src/meta/meta_service.cpp
@@ -980,7 +980,7 @@ void meta_service::update_app_env(app_env_rpc env_rpc)
                          std::bind(&server_state::clear_app_envs, _state.get(), env_rpc));
         break;
     default: // app_env_operation::type::APP_ENV_OP_INVALID
-        LOG_WARNING("recv a invalid update app_env request, just ignore");
+        LOG_WARNING_F("recv a invalid update app_env request, just ignore");
         response.err = ERR_INVALID_PARAMETERS;
         response.hint_message =
             "recv a invalid update_app_env request with op = APP_ENV_OP_INVALID";
diff --git a/src/meta/meta_state_service_zookeeper.cpp b/src/meta/meta_state_service_zookeeper.cpp
index ad2ecb2c8..7a4c282b6 100644
--- a/src/meta/meta_state_service_zookeeper.cpp
+++ b/src/meta/meta_state_service_zookeeper.cpp
@@ -372,7 +372,7 @@ void meta_state_service_zookeeper::on_zoo_session_evt(ref_this _this, int zoo_st
 
     if (ZOO_CONNECTING_STATE == zoo_state) {
         // TODO: support the switch of zookeeper session
-        LOG_WARNING("the zk session is reconnecting");
+        LOG_WARNING_F("the zk session is reconnecting");
     } else if (_this->_first_call && ZOO_CONNECTED_STATE == zoo_state) {
         _this->_first_call = false;
         _this->_notifier.notify();
diff --git a/src/meta/partition_guardian.cpp b/src/meta/partition_guardian.cpp
index f586048bc..7be75a8f0 100644
--- a/src/meta/partition_guardian.cpp
+++ b/src/meta/partition_guardian.cpp
@@ -279,9 +279,9 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
     }
     // well, all replicas in this partition is dead
     else {
-        LOG_WARNING("%s enters DDD state, we are waiting for all replicas to come back, "
-                    "and select primary according to informations collected",
-                    gpid_name);
+        LOG_WARNING_F("{} enters DDD state, we are waiting for all replicas to come back, "
+                      "and select primary according to informations collected",
+                      gpid_name);
         // when considering how to handle the DDD state, we must keep in mind that our
         // shared/private-log data only write to OS-cache.
         // so the last removed replica can't act as primary directly.
@@ -321,9 +321,9 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
         }
 
         if (pc.last_drops.size() == 1) {
-            LOG_WARNING("%s: the only node(%s) is dead, waiting it to come back",
-                        gpid_name,
-                        pc.last_drops.back().to_string());
+            LOG_WARNING_F("{}: the only node({}) is dead, waiting it to come back",
+                          gpid_name,
+                          pc.last_drops.back());
             action.node = pc.last_drops.back();
         } else {
             std::vector<dsn::rpc_address> nodes(pc.last_drops.end() - 2, pc.last_drops.end());
@@ -341,7 +341,7 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
                     ready = false;
                     reason = "the last dropped node(" + nodes[i].to_std_string() +
                              ") haven't come back yet";
-                    LOG_WARNING("%s: don't select primary: %s", gpid_name, reason.c_str());
+                    LOG_WARNING_F("{}: don't select primary: {}", gpid_name, reason);
                 } else {
                     std::vector<dropped_replica>::iterator it = cc.find_from_dropped(nodes[i]);
                     if (it == cc.dropped.end() || it->ballot == invalid_ballot) {
@@ -360,7 +360,7 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
                             } else {
                                 reason += "replica info has not been collected from the node";
                             }
-                            LOG_WARNING("%s: don't select primary: %s", gpid_name, reason.c_str());
+                            LOG_WARNING_F("{}: don't select primary: {}", gpid_name, reason);
                         }
                     } else {
                         collected_info[i] = *it;
@@ -371,7 +371,7 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
             if (ready && collected_info[0].ballot == -1 && collected_info[1].ballot == -1) {
                 ready = false;
                 reason = "no replica info collected from the last two drops";
-                LOG_WARNING("%s: don't select primary: %s", gpid_name, reason.c_str());
+                LOG_WARNING_F("{}: don't select primary: {}", gpid_name, reason);
             }
 
             if (ready) {
@@ -412,12 +412,12 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
                                 larger_pd,
                                 pc.last_committed_decree,
                                 larger_cd);
-                        LOG_WARNING("%s: don't select primary: %s", gpid_name, reason.c_str());
+                        LOG_WARNING_F("{}: don't select primary: {}", gpid_name, reason);
                     }
                 } else {
                     reason = "for the last two drops, the node with larger ballot has smaller last "
                              "committed decree";
-                    LOG_WARNING("%s: don't select primary: %s", gpid_name, reason.c_str());
+                    LOG_WARNING_F("{}: don't select primary: {}", gpid_name, reason);
                 }
             }
         }
@@ -429,9 +429,9 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
             get_newly_partitions(*view.nodes, action.node)
                 ->newly_add_primary(gpid.get_app_id(), false);
         } else {
-            LOG_WARNING("%s: don't select any node for security reason, administrator can select "
-                        "a proper one by shell",
-                        gpid_name);
+            LOG_WARNING_F("{}: don't select any node for security reason, administrator can select "
+                          "a proper one by shell",
+                          gpid_name);
             _recent_choose_primary_fail_count->increment();
             ddd_partition_info pinfo;
             pinfo.config = pc;
diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp
index ec11f038f..54530eecf 100644
--- a/src/meta/server_state.cpp
+++ b/src/meta/server_state.cpp
@@ -442,7 +442,7 @@ error_code server_state::initialize_default_apps()
 
             default_app.app_name = dsn_config_get_value_string(s, "app_name", "", "app name");
             if (default_app.app_name.length() == 0) {
-                LOG_WARNING("'[%s] app_name' not specified, ignore this section", s);
+                LOG_WARNING_F("'[{}] app_name' not specified, ignore this section", s);
                 continue;
             }
 
@@ -519,9 +519,8 @@ error_code server_state::sync_apps_to_remote_storage()
                              LPC_META_CALLBACK,
                              [&err, path](error_code ec) {
                                  if (ec != ERR_OK && ec != ERR_NODE_ALREADY_EXIST) {
-                                     LOG_WARNING("create app node failed, path(%s) reason(%s)",
-                                                 path.c_str(),
-                                                 ec.to_string());
+                                     LOG_WARNING_F(
+                                         "create app node failed, path({}) reason({})", path, ec);
                                      err = ec;
                                  } else {
                                      LOG_INFO_F("create app node {} ok", path);
@@ -898,12 +897,10 @@ void server_state::on_config_sync(configuration_query_by_node_rpc rpc)
                                        _meta_function_level_VALUES_TO_NAMES.find(level)->second);
                         } else {
                             response.gc_replicas.push_back(rep);
-                            LOG_WARNING(
-                                "notify node(%s) to gc replica(%d.%d) coz the app is dropped and "
-                                "expired",
-                                request.node.to_string(),
-                                rep.pid.get_app_id(),
-                                rep.pid.get_partition_index());
+                            LOG_WARNING_F("notify node({}) to gc replica({}) coz the app is "
+                                          "dropped and expired",
+                                          request.node,
+                                          rep.pid);
                         }
                     }
                 } else if (app->status == app_status::AS_AVAILABLE) {
@@ -918,10 +915,9 @@ void server_state::on_config_sync(configuration_query_by_node_rpc rpc)
                                        _meta_function_level_VALUES_TO_NAMES.find(level)->second);
                         } else {
                             response.gc_replicas.push_back(rep);
-                            LOG_WARNING("notify node(%s) to gc replica(%d.%d) coz it is useless",
-                                        request.node.to_string(),
-                                        rep.pid.get_app_id(),
-                                        rep.pid.get_partition_index());
+                            LOG_WARNING_F("notify node({}) to gc replica({}) coz it is useless",
+                                          request.node,
+                                          rep.pid);
                         }
                     }
                 }
@@ -1016,8 +1012,8 @@ void server_state::init_app_partition_node(std::shared_ptr<app_state> &app,
                 callback->enqueue();
             }
         } else if (ERR_TIMEOUT == ec) {
-            LOG_WARNING(
-                "create partition node failed, gpid(%d.%d), retry later", app->app_id, pidx);
+            LOG_WARNING_F(
+                "create partition node failed, gpid({}.{}), retry later", app->app_id, pidx);
             // TODO: add parameter of the retry time interval in config file
             tasking::enqueue(
                 LPC_META_STATE_HIGH,
@@ -1050,7 +1046,8 @@ void server_state::do_app_create(std::shared_ptr<app_state> &app)
                 init_app_partition_node(app, i, nullptr);
             }
         } else if (ERR_TIMEOUT == ec) {
-            LOG_WARNING("the storage service is not available currently, continue to create later");
+            LOG_WARNING_F(
+                "the storage service is not available currently, continue to create later");
             tasking::enqueue(LPC_META_STATE_HIGH,
                              tracker(),
                              std::bind(&server_state::do_app_create, this, app),
@@ -1770,11 +1767,10 @@ void server_state::drop_partition(std::shared_ptr<app_state> &app, int pidx)
     request.config.ballot++;
 
     if (config_status::pending_remote_sync == cc.stage) {
-        LOG_WARNING(
-            "gpid(%d.%d) is syncing another request with remote, cancel it due to partition is "
-            "dropped",
-            app->app_id,
-            pidx);
+        LOG_WARNING_F("gpid({}.{}) is syncing another request with remote, cancel it due to "
+                      "partition is dropped",
+                      app->app_id,
+                      pidx);
         cc.cancel_sync();
     }
     cc.stage = config_status::pending_remote_sync;
@@ -1796,15 +1792,14 @@ void server_state::downgrade_primary_to_inactive(std::shared_ptr<app_state> &app
                          "app({}) not in dropping state ({})",
                          app->get_logname(),
                          enum_to_string(app->status));
-            LOG_WARNING(
-                "stop downgrade primary as the partitions(%d.%d) is dropping", app->app_id, pidx);
+            LOG_WARNING_F(
+                "stop downgrade primary as the partitions({}.{}) is dropping", app->app_id, pidx);
             return;
         } else {
-            LOG_WARNING("gpid(%d.%d) is syncing another request with remote, cancel it due to the "
-                        "primary(%s) is down",
-                        pc.pid.get_app_id(),
-                        pc.pid.get_partition_index(),
-                        pc.primary.to_string());
+            LOG_WARNING_F("gpid({}) is syncing another request with remote, cancel it due to the "
+                          "primary({}) is down",
+                          pc.pid,
+                          pc.primary);
             cc.cancel_sync();
         }
     }
@@ -1882,12 +1877,11 @@ void server_state::downgrade_stateless_nodes(std::shared_ptr<app_state> &app,
     pc.last_drops.pop_back();
 
     if (config_status::pending_remote_sync == cc.stage) {
-        LOG_WARNING("gpid(%d.%d) is syncing another request with remote, cancel it due to meta is "
-                    "removing host(%s) worker(%s)",
-                    pc.pid.get_app_id(),
-                    pc.pid.get_partition_index(),
-                    req->host_node.to_string(),
-                    req->node.to_string());
+        LOG_WARNING_F("gpid({}) is syncing another request with remote, cancel it due to meta is "
+                      "removing host({}) worker({})",
+                      pc.pid,
+                      req->host_node,
+                      req->node);
         cc.cancel_sync();
     }
     cc.stage = config_status::pending_remote_sync;
@@ -2114,10 +2108,10 @@ server_state::construct_apps(const std::vector<query_app_info_response> &query_a
             app->app_name = app->app_name + "__" + boost::lexical_cast<std::string>(app_id);
         }
         if (app->app_name != old_name) {
-            LOG_WARNING("app(%d)'s old name(%s) is conflict with others, rename it to (%s)",
-                        app_id,
-                        old_name.c_str(),
-                        app->app_name.c_str());
+            LOG_WARNING_F("app({})'s old name({}) is conflict with others, rename it to ({})",
+                          app_id,
+                          old_name,
+                          app->app_name);
             std::ostringstream oss;
             oss << "WARNING: app(" << app_id << ")'s old name(" << old_name
                 << ") is conflict with others, rename it to (" << app->app_name << ")" << std::endl;
@@ -2185,9 +2179,9 @@ error_code server_state::construct_partitions(
                     }
                     succeed_count++;
                 } else {
-                    LOG_WARNING("construct partition(%d.%d) failed",
-                                app->app_id,
-                                pc.pid.get_partition_index());
+                    LOG_WARNING_F("construct partition({}.{}) failed",
+                                  app->app_id,
+                                  pc.pid.get_partition_index());
                     std::ostringstream oss;
                     if (skip_lost_partitions) {
                         oss << "WARNING: partition(" << app->app_id << "."
@@ -2283,15 +2277,15 @@ server_state::sync_apps_from_replica_nodes(const std::vector<dsn::rpc_address> &
     for (int i = 0; i < n_replicas; ++i) {
         error_code err = dsn::ERR_OK;
         if (query_app_errors[i] != dsn::ERR_OK) {
-            LOG_WARNING("query app info from node(%s) failed, reason: %s",
-                        replica_nodes[i].to_string(),
-                        query_app_errors[i].to_string());
+            LOG_WARNING_F("query app info from node({}) failed, reason: {}",
+                          replica_nodes[i],
+                          query_app_errors[i]);
             err = query_app_errors[i];
         }
         if (query_replica_errors[i] != dsn::ERR_OK) {
-            LOG_WARNING("query replica info from node(%s) failed, reason: %s",
-                        replica_nodes[i].to_string(),
-                        query_replica_errors[i].to_string());
+            LOG_WARNING_F("query replica info from node({}) failed, reason: {}",
+                          replica_nodes[i],
+                          query_replica_errors[i]);
             err = query_replica_errors[i];
         }
         if (err != dsn::ERR_OK) {
@@ -2670,9 +2664,9 @@ void server_state::do_update_app_info(const std::string &app_path,
         if (ec == ERR_OK) {
             user_cb(ec);
         } else if (ec == ERR_TIMEOUT) {
-            LOG_WARNING(
-                "update app_info(app = %s) to remote storage timeout, continue to update later",
-                info.app_name.c_str());
+            LOG_WARNING_F(
+                "update app_info(app = {}) to remote storage timeout, continue to update later",
+                info.app_name);
             tasking::enqueue(
                 LPC_META_STATE_NORMAL,
                 tracker(),
@@ -2696,7 +2690,7 @@ void server_state::set_app_envs(const app_env_rpc &env_rpc)
     if (!request.__isset.keys || !request.__isset.values ||
         request.keys.size() != request.values.size() || request.keys.size() <= 0) {
         env_rpc.response().err = ERR_INVALID_PARAMETERS;
-        LOG_WARNING("set app envs failed with invalid request");
+        LOG_WARNING_F("set app envs failed with invalid request");
         return;
     }
     const std::vector<std::string> &keys = request.keys;
@@ -2726,7 +2720,7 @@ void server_state::set_app_envs(const app_env_rpc &env_rpc)
         zauto_read_lock l(_lock);
         std::shared_ptr<app_state> app = get_app(app_name);
         if (app == nullptr) {
-            LOG_WARNING("set app envs failed with invalid app_name(%s)", app_name.c_str());
+            LOG_WARNING_F("set app envs failed with invalid app_name({})", app_name);
             env_rpc.response().err = ERR_INVALID_PARAMETERS;
             env_rpc.response().hint_message = "invalid app name";
             return;
@@ -2757,7 +2751,7 @@ void server_state::del_app_envs(const app_env_rpc &env_rpc)
     const configuration_update_app_env_request &request = env_rpc.request();
     if (!request.__isset.keys || request.keys.size() <= 0) {
         env_rpc.response().err = ERR_INVALID_PARAMETERS;
-        LOG_WARNING("del app envs failed with invalid request");
+        LOG_WARNING_F("del app envs failed with invalid request");
         return;
     }
     const std::vector<std::string> &keys = request.keys;
@@ -2780,7 +2774,7 @@ void server_state::del_app_envs(const app_env_rpc &env_rpc)
         zauto_read_lock l(_lock);
         std::shared_ptr<app_state> app = get_app(app_name);
         if (app == nullptr) {
-            LOG_WARNING("del app envs failed with invalid app_name(%s)", app_name.c_str());
+            LOG_WARNING_F("del app envs failed with invalid app_name({})", app_name);
             env_rpc.response().err = ERR_INVALID_PARAMETERS;
             env_rpc.response().hint_message = "invalid app name";
             return;
@@ -2827,7 +2821,7 @@ void server_state::clear_app_envs(const app_env_rpc &env_rpc)
     const configuration_update_app_env_request &request = env_rpc.request();
     if (!request.__isset.clear_prefix) {
         env_rpc.response().err = ERR_INVALID_PARAMETERS;
-        LOG_WARNING("clear app envs failed with invalid request");
+        LOG_WARNING_F("clear app envs failed with invalid request");
         return;
     }
 
@@ -2844,7 +2838,7 @@ void server_state::clear_app_envs(const app_env_rpc &env_rpc)
         zauto_read_lock l(_lock);
         std::shared_ptr<app_state> app = get_app(app_name);
         if (app == nullptr) {
-            LOG_WARNING("clear app envs failed with invalid app_name(%s)", app_name.c_str());
+            LOG_WARNING_F("clear app envs failed with invalid app_name({})", app_name);
             env_rpc.response().err = ERR_INVALID_PARAMETERS;
             env_rpc.response().hint_message = "invalid app name";
             return;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pegasus.apache.org
For additional commands, e-mail: commits-help@pegasus.apache.org