This is an automated email from the ASF dual-hosted git repository.

zhaoliwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git


The following commit(s) were added to refs/heads/master by this push:
     new 8d3b42c43 refactor(log): use LOG_INFO_F instead of LOG_INFO (2/3) 
(#1310)
8d3b42c43 is described below

commit 8d3b42c43952b2db0a498b36ff3e7397787cb125
Author: Yingchun Lai <[email protected]>
AuthorDate: Fri Jan 13 19:03:52 2023 +0800

    refactor(log): use LOG_INFO_F instead of LOG_INFO (2/3) (#1310)
---
 src/meta/app_balance_policy.cpp                 |   4 +-
 src/meta/greedy_load_balancer.cpp               |   9 +-
 src/meta/load_balance_policy.cpp                |  17 ++-
 src/meta/meta_backup_service.cpp                | 158 ++++++++++-----------
 src/meta/meta_backup_service.h                  |   4 +
 src/meta/meta_data.cpp                          |  37 +++--
 src/meta/meta_server_failure_detector.cpp       |  60 ++++----
 src/meta/meta_service.cpp                       |  46 +++----
 src/meta/meta_service.h                         |   8 +-
 src/meta/meta_state_service_zookeeper.cpp       |   2 +-
 src/meta/partition_guardian.cpp                 | 174 ++++++++++++------------
 src/meta/server_state.cpp                       | 128 +++++++++--------
 src/meta/server_state_restore.cpp               |   9 +-
 src/meta/test/backup_test.cpp                   |   8 +-
 src/meta/test/meta_state/meta_state_service.cpp |   2 +-
 src/meta/test/update_configuration_test.cpp     |   2 +-
 16 files changed, 323 insertions(+), 345 deletions(-)

diff --git a/src/meta/app_balance_policy.cpp b/src/meta/app_balance_policy.cpp
index 3e61ad168..07acbc8bc 100644
--- a/src/meta/app_balance_policy.cpp
+++ b/src/meta/app_balance_policy.cpp
@@ -96,11 +96,11 @@ void app_balance_policy::balance(bool checker, const 
meta_view *global_view, mig
 bool app_balance_policy::need_balance_secondaries(bool balance_checker)
 {
     if (!balance_checker && !_migration_result->empty()) {
-        LOG_INFO("stop to do secondary balance coz we already have actions to 
do");
+        LOG_INFO_F("stop to do secondary balance coz we already have actions 
to do");
         return false;
     }
     if (_only_primary_balancer) {
-        LOG_INFO("stop to do secondary balancer coz it is not allowed");
+        LOG_INFO_F("stop to do secondary balancer coz it is not allowed");
         return false;
     }
     return true;
diff --git a/src/meta/greedy_load_balancer.cpp 
b/src/meta/greedy_load_balancer.cpp
index f02e074dc..3487feccf 100644
--- a/src/meta/greedy_load_balancer.cpp
+++ b/src/meta/greedy_load_balancer.cpp
@@ -154,10 +154,7 @@ bool 
greedy_load_balancer::all_replica_infos_collected(const node_state &ns)
     return ns.for_each_partition([this, n](const dsn::gpid &pid) {
         config_context &cc = *get_config_context(*(t_global_view->apps), pid);
         if (cc.find_from_serving(n) == cc.serving.end()) {
-            LOG_INFO("meta server hasn't collected gpid(%d.%d)'s info of %s",
-                     pid.get_app_id(),
-                     pid.get_partition_index(),
-                     n.to_string());
+            LOG_INFO_F("meta server hasn't collected gpid({})'s info of {}", 
pid, n);
             return false;
         }
         return true;
@@ -189,7 +186,7 @@ void greedy_load_balancer::greedy_balancer(const bool 
balance_checker)
 
 bool greedy_load_balancer::balance(meta_view view, migration_list &list)
 {
-    LOG_INFO("balancer round");
+    LOG_INFO_F("balancer round");
     list.clear();
 
     t_alive_nodes = view.nodes->size();
@@ -203,7 +200,7 @@ bool greedy_load_balancer::balance(meta_view view, 
migration_list &list)
 
 bool greedy_load_balancer::check(meta_view view, migration_list &list)
 {
-    LOG_INFO("balance checker round");
+    LOG_INFO_F("balance checker round");
     list.clear();
 
     t_alive_nodes = view.nodes->size();
diff --git a/src/meta/load_balance_policy.cpp b/src/meta/load_balance_policy.cpp
index 5738ce4ca..bb43d44c7 100644
--- a/src/meta/load_balance_policy.cpp
+++ b/src/meta/load_balance_policy.cpp
@@ -155,13 +155,12 @@ generate_balancer_request(const app_mapper &apps,
     default:
         CHECK(false, "");
     }
-    LOG_INFO("generate balancer: %d.%d %s from %s of disk_tag(%s) to %s",
-             pc.pid.get_app_id(),
-             pc.pid.get_partition_index(),
-             ans.c_str(),
-             from.to_string(),
-             get_disk_tag(apps, from, pc.pid).c_str(),
-             to.to_string());
+    LOG_INFO_F("generate balancer: {} {} from {} of disk_tag({}) to {}",
+               pc.pid,
+               ans,
+               from,
+               get_disk_tag(apps, from, pc.pid),
+               to);
     return std::make_shared<configuration_balancer_request>(std::move(result));
 }
 
@@ -370,8 +369,8 @@ bool load_balance_policy::execute_balance(
         if (!balance_checker) {
             if (!_migration_result->empty()) {
                 if (balance_in_turn) {
-                    LOG_INFO("stop to handle more apps after we found some 
actions for %s",
-                             app->get_logname());
+                    LOG_INFO_F("stop to handle more apps after we found some 
actions for {}",
+                               app->get_logname());
                     return false;
                 }
             }
diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp
index 1fb6faa7f..fa2db91a1 100644
--- a/src/meta/meta_backup_service.cpp
+++ b/src/meta/meta_backup_service.cpp
@@ -121,9 +121,9 @@ void policy_context::start_backup_app_meta_unlocked(int32_t 
app_id)
                 CHECK_EQ(resp.written_size, buffer.length());
                 {
                     zauto_lock l(_lock);
-                    LOG_INFO("%s: successfully backup app metadata to %s",
-                             _policy.policy_name.c_str(),
-                             remote_file->file_name().c_str());
+                    LOG_INFO_F("{}: successfully backup app metadata to {}",
+                               _policy.policy_name,
+                               remote_file->file_name());
                     start_backup_app_partitions_unlocked(app_id);
                 }
             } else if (resp.err == ERR_FS_INTERNAL) {
@@ -222,7 +222,7 @@ void 
policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id,
     if (remote_file->get_size() > 0) {
         // we only focus whether app_backup_status file is exist, so ignore 
app_backup_status file's
         // context
-        LOG_INFO("app(%d) already write finish-flag on block service", app_id);
+        LOG_INFO_F("app({}) already write finish-flag on block service", 
app_id);
         if (write_callback != nullptr) {
             write_callback->enqueue();
         }
@@ -237,8 +237,8 @@ void 
policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id,
         [this, app_id, write_callback, remote_file](
             const dist::block_service::write_response &resp) {
             if (resp.err == ERR_OK) {
-                LOG_INFO("app(%d) finish backup and write finish-flag on block 
service succeed",
-                         app_id);
+                LOG_INFO_F("app({}) finish backup and write finish-flag on 
block service succeed",
+                           app_id);
                 if (write_callback != nullptr) {
                     write_callback->enqueue();
                 }
@@ -267,12 +267,12 @@ void 
policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id,
 
 void policy_context::finish_backup_app_unlocked(int32_t app_id)
 {
-    LOG_INFO("%s: finish backup for app(%d), progress(%d)",
-             _backup_sig.c_str(),
-             app_id,
-             _progress.unfinished_apps);
+    LOG_INFO_F("{}: finish backup for app({}), progress({})",
+               _backup_sig,
+               app_id,
+               _progress.unfinished_apps);
     if (--_progress.unfinished_apps == 0) {
-        LOG_INFO("%s: finish current backup for all apps", 
_backup_sig.c_str());
+        LOG_INFO_F("{}: finish current backup for all apps", _backup_sig);
         _cur_backup.end_time_ms = dsn_now_ms();
 
         task_ptr write_backup_info_callback =
@@ -287,8 +287,7 @@ void policy_context::finish_backup_app_unlocked(int32_t 
app_id)
                               _cur_backup.backup_id);
                         _cur_backup.start_time_ms = 0;
                         _cur_backup.end_time_ms = 0;
-                        LOG_INFO("%s: finish an old backup, try to start a new 
one",
-                                 _backup_sig.c_str());
+                        LOG_INFO_F("{}: finish an old backup, try to start a 
new one", _backup_sig);
                         issue_new_backup_unlocked();
                     });
                 sync_backup_to_remote_storage_unlocked(_cur_backup, 
start_a_new_backup, false);
@@ -345,8 +344,8 @@ void policy_context::write_backup_info_unlocked(const 
backup_info &b_info,
         [this, b_info, write_callback, remote_file](
             const dist::block_service::write_response &resp) {
             if (resp.err == ERR_OK) {
-                LOG_INFO("policy(%s) write backup_info to cold backup media 
succeed",
-                         _policy.policy_name.c_str());
+                LOG_INFO_F("policy({}) write backup_info to cold backup media 
succeed",
+                           _policy.policy_name);
                 if (write_callback != nullptr) {
                     write_callback->enqueue();
                 }
@@ -614,11 +613,11 @@ void 
policy_context::sync_backup_to_remote_storage_unlocked(const backup_info &b
 
     auto callback = [this, b_info, sync_callback, 
create_new_node](dsn::error_code err) {
         if (dsn::ERR_OK == err || (create_new_node && ERR_NODE_ALREADY_EXIST 
== err)) {
-            LOG_INFO("%s: synced backup_info(%" PRId64 ") to remote storage 
successfully,"
-                     " start real backup work, new_node_create(%s)",
-                     _policy.policy_name.c_str(),
-                     b_info.backup_id,
-                     create_new_node ? "true" : "false");
+            LOG_INFO_F("{}: synced backup_info({}) to remote storage 
successfully, "
+                       "start real backup work, new_node_create({})",
+                       _policy.policy_name,
+                       b_info.backup_id,
+                       create_new_node ? "true" : "false");
             if (sync_callback != nullptr) {
                 sync_callback->enqueue();
             } else {
@@ -744,8 +743,7 @@ void policy_context::issue_new_backup_unlocked()
 {
     // before issue new backup, we check whether the policy is dropped
     if (_policy.is_disable) {
-        LOG_INFO("%s: policy is disabled, just ignore backup, try it later",
-                 _policy.policy_name.c_str());
+        LOG_INFO_F("{}: policy is disabled, just ignore backup, try it later", 
_policy.policy_name);
         tasking::enqueue(LPC_DEFAULT_CALLBACK,
                          &_tracker,
                          [this]() {
@@ -766,9 +764,9 @@ void policy_context::issue_new_backup_unlocked()
                          },
                          0,
                          
_backup_service->backup_option().issue_backup_interval_ms);
-        LOG_INFO("%s: start issue new backup %" PRId64 "ms later",
-                 _policy.policy_name.c_str(),
-                 
_backup_service->backup_option().issue_backup_interval_ms.count());
+        LOG_INFO_F("{}: start issue new backup {}ms later",
+                   _policy.policy_name,
+                   
_backup_service->backup_option().issue_backup_interval_ms.count());
         return;
     }
 
@@ -814,17 +812,17 @@ void policy_context::start()
         "policy recent backup duration time");
 
     issue_gc_backup_info_task_unlocked();
-    LOG_INFO("%s: start gc backup info task succeed", 
_policy.policy_name.c_str());
+    LOG_INFO_F("{}: start gc backup info task succeed", _policy.policy_name);
 }
 
 void policy_context::add_backup_history(const backup_info &info)
 {
     zauto_lock l(_lock);
     if (info.end_time_ms <= 0) {
-        LOG_INFO("%s: encounter an unfished backup_info(%lld), 
start_time(%lld), continue it later",
-                 _policy.policy_name.c_str(),
-                 info.backup_id,
-                 info.start_time_ms);
+        LOG_INFO_F("{}: encounter an unfished backup_info({}), start_time({}), 
continue it later",
+                   _policy.policy_name,
+                   info.backup_id,
+                   info.start_time_ms);
 
         CHECK_EQ_MSG(_cur_backup.start_time_ms,
                      0,
@@ -842,11 +840,11 @@ void policy_context::add_backup_history(const backup_info 
&info)
         _backup_sig =
             _policy.policy_name + "@" + 
boost::lexical_cast<std::string>(_cur_backup.backup_id);
     } else {
-        LOG_INFO("%s: add backup history, id(%lld), start_time(%lld), 
endtime(%lld)",
-                 _policy.policy_name.c_str(),
-                 info.backup_id,
-                 info.start_time_ms,
-                 info.end_time_ms);
+        LOG_INFO_F("{}: add backup history, id({}), start_time({}), 
endtime({})",
+                   _policy.policy_name,
+                   info.backup_id,
+                   info.start_time_ms,
+                   info.end_time_ms);
         CHECK(_cur_backup.end_time_ms == 0 || info.backup_id < 
_cur_backup.backup_id,
               "{}: backup_id({}) in history larger than current({})",
               _policy.policy_name,
@@ -915,11 +913,11 @@ void policy_context::gc_backup_info_unlocked(const 
backup_info &info_to_gc)
     ::dsn::utils::time_ms_to_date_time(
         static_cast<uint64_t>(info_to_gc.start_time_ms), start_time, 30);
     
::dsn::utils::time_ms_to_date_time(static_cast<uint64_t>(info_to_gc.end_time_ms),
 end_time, 30);
-    LOG_INFO("%s: start to gc backup info, backup_id(%" PRId64 "), 
start_time(%s), end_time(%s)",
-             _policy.policy_name.c_str(),
-             info_to_gc.backup_id,
-             start_time,
-             end_time);
+    LOG_INFO_F("{}: start to gc backup info, backup_id({}), start_time({}), 
end_time({})",
+               _policy.policy_name,
+               info_to_gc.backup_id,
+               start_time,
+               end_time);
 
     dsn::task_ptr sync_callback =
         ::dsn::tasking::create_task(LPC_DEFAULT_CALLBACK, &_tracker, [this, 
info_to_gc]() {
@@ -958,9 +956,7 @@ void policy_context::issue_gc_backup_info_task_unlocked()
     if (_backup_history.size() > _policy.backup_history_count_to_keep) {
         backup_info &info = _backup_history.begin()->second;
         info.info_status = backup_info_status::type::DELETING;
-        LOG_INFO("%s: start to gc backup info with id(%" PRId64 ")",
-                 _policy.policy_name.c_str(),
-                 info.backup_id);
+        LOG_INFO_F("{}: start to gc backup info with id({})", 
_policy.policy_name, info.backup_id);
 
         tasking::create_task(LPC_DEFAULT_CALLBACK, &_tracker, [this, info]() {
             gc_backup_info_unlocked(info);
@@ -998,11 +994,9 @@ void policy_context::sync_remove_backup_info(const 
backup_info &info, dsn::task_
         _backup_service->get_backup_path(_policy.policy_name, info.backup_id);
     auto callback = [this, info, sync_callback](dsn::error_code err) {
         if (err == dsn::ERR_OK || err == dsn::ERR_OBJECT_NOT_FOUND) {
-            LOG_INFO(
-                "%s: sync remove backup_info on remote storage successfully, 
backup_id(%" PRId64
-                ")",
-                _policy.policy_name.c_str(),
-                info.backup_id);
+            LOG_INFO_F("{}: sync remove backup_info on remote storage 
successfully, backup_id({})",
+                       _policy.policy_name,
+                       info.backup_id);
             if (sync_callback != nullptr) {
                 sync_callback->enqueue();
             }
@@ -1054,9 +1048,8 @@ void 
backup_service::start_create_policy_meta_root(dsn::task_ptr callback)
     _meta_svc->get_remote_storage()->create_node(
         _policy_meta_root, LPC_DEFAULT_CALLBACK, [this, 
callback](dsn::error_code err) {
             if (err == dsn::ERR_OK || err == ERR_NODE_ALREADY_EXIST) {
-                LOG_INFO("create policy meta root(%s) succeed, with err(%s)",
-                         _policy_meta_root.c_str(),
-                         err.to_string());
+                LOG_INFO_F(
+                    "create policy meta root({}) succeed, with err({})", 
_policy_meta_root, err);
                 callback->enqueue();
             } else if (err == dsn::ERR_TIMEOUT) {
                 LOG_ERROR("create policy meta root(%s) timeout, try it later",
@@ -1077,11 +1070,11 @@ void backup_service::start_sync_policies()
 {
     // TODO: make sync_policies_from_remote_storage function to async
     //       sync-api will leader to deadlock when the threadnum = 1 in 
default threadpool
-    LOG_INFO("backup service start to sync policies from remote storage");
+    LOG_INFO_F("backup service start to sync policies from remote storage");
     dsn::error_code err = sync_policies_from_remote_storage();
     if (err == dsn::ERR_OK) {
         for (auto &policy_kv : _policy_states) {
-            LOG_INFO("policy(%s) start to backup", policy_kv.first.c_str());
+            LOG_INFO_F("policy({}) start to backup", policy_kv.first);
             policy_kv.second->start();
         }
         if (_policy_states.empty()) {
@@ -1131,8 +1124,7 @@ error_code 
backup_service::sync_policies_from_remote_storage()
                 ptr->add_backup_history(tbackup_info);
             } else {
                 err = ec;
-                LOG_INFO("init backup_info from remote storage fail, 
error_code = %s",
-                         ec.to_string());
+                LOG_INFO_F("init backup_info from remote storage fail, 
error_code = {}", ec);
             }
         };
         std::string backup_info_root = get_policy_path(policy_name);
@@ -1147,9 +1139,9 @@ error_code 
backup_service::sync_policies_from_remote_storage()
                         for (const auto &b_id : children) {
                             int64_t backup_id = 
boost::lexical_cast<int64_t>(b_id);
                             std::string backup_path = 
get_backup_path(policy_name, backup_id);
-                            LOG_INFO("start to acquire backup_info(%" PRId64 
") of policy(%s)",
-                                     backup_id,
-                                     policy_name.c_str());
+                            LOG_INFO_F("start to acquire backup_info({}) of 
policy({})",
+                                       backup_id,
+                                       policy_name);
                             _meta_svc->get_remote_storage()->get_data(
                                 backup_path,
                                 TASK_CODE_EXEC_INLINED,
@@ -1158,8 +1150,8 @@ error_code 
backup_service::sync_policies_from_remote_storage()
                         }
                     } else // have not backup
                     {
-                        LOG_INFO("policy has not started a backup process, 
policy_name = %s",
-                                 policy_name.c_str());
+                        LOG_INFO_F("policy has not started a backup process, 
policy_name = {}",
+                                   policy_name);
                     }
                 } else {
                     err = ec;
@@ -1176,7 +1168,7 @@ error_code 
backup_service::sync_policies_from_remote_storage()
     auto init_one_policy =
         [this, &err, &tracker, &init_backup_info](const std::string 
&policy_name) {
             auto policy_path = get_policy_path(policy_name);
-            LOG_INFO("start to acquire the context of policy(%s)", 
policy_name.c_str());
+            LOG_INFO_F("start to acquire the context of policy({})", 
policy_name);
             _meta_svc->get_remote_storage()->get_data(
                 policy_path,
                 LPC_DEFAULT_CALLBACK, // TASK_CODE_EXEC_INLINED,
@@ -1326,7 +1318,7 @@ void backup_service::do_add_policy(dsn::message_ex *req,
                 configuration_add_backup_policy_response resp;
                 resp.hint_message = hint_msg;
                 resp.err = ERR_OK;
-                LOG_INFO("add backup policy succeed, policy_name = %s", 
policy_name.c_str());
+                LOG_INFO_F("add backup policy succeed, policy_name = {}", 
policy_name);
 
                 _meta_svc->reply_data(req, resp);
                 req->release_ref();
@@ -1366,8 +1358,8 @@ void backup_service::do_update_policy_to_remote_storage(
             if (err == ERR_OK) {
                 configuration_modify_backup_policy_response resp;
                 resp.err = ERR_OK;
-                LOG_INFO("update backup policy to remote storage succeed, 
policy_name = %s",
-                         p.policy_name.c_str());
+                LOG_INFO_F("update backup policy to remote storage succeed, 
policy_name = {}",
+                           p.policy_name);
                 p_context_ptr->set_policy(p);
             } else if (err == ERR_TIMEOUT) {
                 LOG_ERROR("update backup policy to remote storage failed, 
policy_name = %s, retry "
@@ -1515,23 +1507,23 @@ void 
backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc
     if (request.__isset.is_disable) {
         if (request.is_disable) {
             if (is_under_backup) {
-                LOG_INFO("%s: policy is under backuping, not allow to disable",
-                         cur_policy.policy_name.c_str());
+                LOG_INFO_F("{}: policy is under backuping, not allow to 
disable",
+                           cur_policy.policy_name);
                 response.err = ERR_BUSY;
             } else if (!cur_policy.is_disable) {
-                LOG_INFO("%s: policy is marked to disable", 
cur_policy.policy_name.c_str());
+                LOG_INFO_F("{}: policy is marked to disable", 
cur_policy.policy_name);
                 cur_policy.is_disable = true;
                 have_modify_policy = true;
             } else { // cur_policy.is_disable = true
-                LOG_INFO("%s: policy is already disabled", 
cur_policy.policy_name.c_str());
+                LOG_INFO_F("{}: policy is already disabled", 
cur_policy.policy_name);
             }
         } else {
             if (cur_policy.is_disable) {
                 cur_policy.is_disable = false;
-                LOG_INFO("%s: policy is marked to enable", 
cur_policy.policy_name.c_str());
+                LOG_INFO_F("{}: policy is marked to enable", 
cur_policy.policy_name);
                 have_modify_policy = true;
             } else {
-                LOG_INFO("%s: policy is already enabled", 
cur_policy.policy_name.c_str());
+                LOG_INFO_F("{}: policy is already enabled", 
cur_policy.policy_name);
                 response.err = ERR_OK;
                 response.hint_message = std::string("policy is already 
enabled");
             }
@@ -1550,7 +1542,7 @@ void 
backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc
         for (const auto &appid : request.removal_appids) {
             if (appid > 0) {
                 cur_policy.app_ids.erase(appid);
-                LOG_INFO("%s: remove app(%d) to policy", 
cur_policy.policy_name.c_str(), appid);
+                LOG_INFO_F("{}: remove app({}) to policy", 
cur_policy.policy_name, appid);
                 have_modify_policy = true;
             } else {
                 LOG_WARNING(
@@ -1561,10 +1553,10 @@ void 
backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc
 
     if (request.__isset.new_backup_interval_sec) {
         if (request.new_backup_interval_sec > 0) {
-            LOG_INFO("%s: policy will change backup interval from %" PRId64 
"(s) to %" PRId64 "(s)",
-                     cur_policy.policy_name.c_str(),
-                     cur_policy.backup_interval_seconds,
-                     request.new_backup_interval_sec);
+            LOG_INFO_F("{}: policy will change backup interval from {}s to 
{}s",
+                       cur_policy.policy_name,
+                       cur_policy.backup_interval_seconds,
+                       request.new_backup_interval_sec);
             cur_policy.backup_interval_seconds = 
request.new_backup_interval_sec;
             have_modify_policy = true;
         } else {
@@ -1576,10 +1568,10 @@ void 
backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc
 
     if (request.__isset.backup_history_count_to_keep) {
         if (request.backup_history_count_to_keep > 0) {
-            LOG_INFO("%s: policy will change backup_history_count_to_keep from 
(%d) to (%d)",
-                     cur_policy.policy_name.c_str(),
-                     cur_policy.backup_history_count_to_keep,
-                     request.backup_history_count_to_keep);
+            LOG_INFO_F("{}: policy will change backup_history_count_to_keep 
from {} to {}",
+                       cur_policy.policy_name,
+                       cur_policy.backup_history_count_to_keep,
+                       request.backup_history_count_to_keep);
             cur_policy.backup_history_count_to_keep = 
request.backup_history_count_to_keep;
             have_modify_policy = true;
         }
@@ -1588,10 +1580,10 @@ void 
backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc
     if (request.__isset.start_time) {
         backup_start_time t_start_time;
         if (t_start_time.parse_from(request.start_time)) {
-            LOG_INFO("%s: policy change start_time from (%s) to (%s)",
-                     cur_policy.policy_name.c_str(),
-                     cur_policy.start_time.to_string().c_str(),
-                     t_start_time.to_string().c_str());
+            LOG_INFO_F("{}: policy change start_time from {} to {}",
+                       cur_policy.policy_name,
+                       cur_policy.start_time,
+                       t_start_time);
             cur_policy.start_time = t_start_time;
             have_modify_policy = true;
         }
diff --git a/src/meta/meta_backup_service.h b/src/meta/meta_backup_service.h
index bf4b98e19..a1a2cfb81 100644
--- a/src/meta/meta_backup_service.h
+++ b/src/meta/meta_backup_service.h
@@ -92,6 +92,10 @@ struct backup_start_time
            << std::setfill('0') << std::to_string(minute);
         return ss.str();
     }
+    friend std::ostream &operator<<(std::ostream &os, const backup_start_time 
&t)
+    {
+        return os << t.to_string();
+    }
     // NOTICE: this function will modify hour and minute, if time is invalid, 
this func will set
     // hour = 24, minute = 0
     bool parse_from(const std::string &time)
diff --git a/src/meta/meta_data.cpp b/src/meta/meta_data.cpp
index 3f49c34fb..344b2f98a 100644
--- a/src/meta/meta_data.cpp
+++ b/src/meta/meta_data.cpp
@@ -155,14 +155,13 @@ bool construct_replica(meta_view view, const gpid &pid, 
int max_replica_count)
     pc.partition_flags = 0;
     pc.max_replica_count = max_replica_count;
 
-    LOG_INFO("construct for (%d.%d), select %s as primary, ballot(%" PRId64
-             "), committed_decree(%" PRId64 "), prepare_decree(%" PRId64 ")",
-             pid.get_app_id(),
-             pid.get_partition_index(),
-             server.node.to_string(),
-             server.ballot,
-             server.last_committed_decree,
-             server.last_prepared_decree);
+    LOG_INFO_F("construct for ({}), select {} as primary, ballot({}), 
committed_decree({}), "
+               "prepare_decree({})",
+               pid,
+               server.node,
+               server.ballot,
+               server.last_committed_decree,
+               server.last_prepared_decree);
 
     drop_list.pop_back();
 
@@ -178,14 +177,13 @@ bool construct_replica(meta_view view, const gpid &pid, 
int max_replica_count)
             break;
         // similar to cc.drop_list, pc.last_drop is also a stack structure
         pc.last_drops.insert(pc.last_drops.begin(), iter->node);
-        LOG_INFO("construct for (%d.%d), select %s into last_drops, ballot(%" 
PRId64
-                 "), committed_decree(%" PRId64 "), prepare_decree(%" PRId64 
")",
-                 pid.get_app_id(),
-                 pid.get_partition_index(),
-                 iter->node.to_string(),
-                 iter->ballot,
-                 iter->last_committed_decree,
-                 iter->last_prepared_decree);
+        LOG_INFO_F("construct for ({}), select {} into last_drops, ballot({}), 
"
+                   "committed_decree({}), prepare_decree({})",
+                   pid,
+                   iter->node,
+                   iter->ballot,
+                   iter->last_committed_decree,
+                   iter->last_prepared_decree);
     }
 
     cc.prefered_dropped = (int)drop_list.size() - 1;
@@ -244,10 +242,9 @@ void proposal_actions::track_current_learner(const 
dsn::rpc_address &node, const
             // if we've collected inforamtions for the learner, then it claims 
it's down
             // we will treat the learning process failed
             if (current_learner.ballot != invalid_ballot) {
-                LOG_INFO("%d.%d: a learner's is down to status(%s), perhaps 
learn failed",
-                         info.pid.get_app_id(),
-                         info.pid.get_partition_index(),
-                         dsn::enum_to_string(info.status));
+                LOG_INFO_F("{}: a learner's is down to status({}), perhaps 
learn failed",
+                           info.pid,
+                           dsn::enum_to_string(info.status));
                 learning_progress_abnormal_detected = true;
             } else {
                 LOG_DEBUG_F(
diff --git a/src/meta/meta_server_failure_detector.cpp 
b/src/meta/meta_server_failure_detector.cpp
index 73f0a79b7..49e49b40c 100644
--- a/src/meta/meta_server_failure_detector.cpp
+++ b/src/meta/meta_server_failure_detector.cpp
@@ -136,10 +136,10 @@ void meta_server_failure_detector::acquire_leader_lock()
             // lock granted
             LPC_META_SERVER_LEADER_LOCK_CALLBACK,
             [this, &err](error_code ec, const std::string &owner, uint64_t 
version) {
-                LOG_INFO("leader lock granted callback: err(%s), owner(%s), 
version(%llu)",
-                         ec.to_string(),
-                         owner.c_str(),
-                         version);
+                LOG_INFO_F("leader lock granted callback: err({}), owner({}), 
version({})",
+                           ec,
+                           owner,
+                           version);
                 err = ec;
                 if (err == dsn::ERR_OK) {
                     leader_initialize(owner);
@@ -178,11 +178,11 @@ void 
meta_server_failure_detector::reset_stability_stat(const rpc_address &node)
     if (iter == _stablity.end())
         return;
     else {
-        LOG_INFO("old stability stat: node(%s), start_time(%lld), 
unstable_count(%d), will reset "
-                 "unstable count to 0",
-                 node.to_string(),
-                 iter->second.last_start_time_ms,
-                 iter->second.unstable_restart_count);
+        LOG_INFO_F("old stability stat: node({}), start_time({}), 
unstable_count({}), will reset "
+                   "unstable count to 0",
+                   node,
+                   iter->second.last_start_time_ms,
+                   iter->second.unstable_restart_count);
         iter->second.unstable_restart_count = 0;
     }
 }
@@ -213,16 +213,16 @@ bool 
meta_server_failure_detector::update_stability_stat(const fd::beacon_msg &b
             if (dsn_now_ms() - w.last_start_time_ms >=
                     _fd_opts->stable_rs_min_running_seconds * 1000 &&
                 w.unstable_restart_count > 0) {
-                LOG_INFO("%s has stably run for a while, reset it's unstable 
count(%d) to 0",
-                         beacon.from_addr.to_string(),
-                         w.unstable_restart_count);
+                LOG_INFO_F("{} has stably run for a while, reset it's unstable 
count({}) to 0",
+                           beacon.from_addr,
+                           w.unstable_restart_count);
                 w.unstable_restart_count = 0;
             }
         } else if (beacon.start_time > w.last_start_time_ms) {
-            LOG_INFO("check %s restarted, last_time(%lld), this_time(%lld)",
-                     beacon.from_addr.to_string(),
-                     w.last_start_time_ms,
-                     beacon.start_time);
+            LOG_INFO_F("check {} restarted, last_time({}), this_time({})",
+                       beacon.from_addr,
+                       w.last_start_time_ms,
+                       beacon.start_time);
             if (beacon.start_time - w.last_start_time_ms <
                 _fd_opts->stable_rs_min_running_seconds * 1000) {
                 w.unstable_restart_count++;
@@ -230,11 +230,11 @@ bool 
meta_server_failure_detector::update_stability_stat(const fd::beacon_msg &b
                             beacon.from_addr.to_string(),
                             w.unstable_restart_count);
             } else if (w.unstable_restart_count > 0) {
-                LOG_INFO("%s restart in %lld ms after last restart, may 
recover ok, reset "
-                         "it's unstable count(%d) to 0",
-                         beacon.from_addr.to_string(),
-                         beacon.start_time - w.last_start_time_ms,
-                         w.unstable_restart_count);
+                LOG_INFO_F("{} restart in {} ms after last restart, may 
recover ok, reset "
+                           "it's unstable count({}) to 0",
+                           beacon.from_addr,
+                           beacon.start_time - w.last_start_time_ms,
+                           w.unstable_restart_count);
                 w.unstable_restart_count = 0;
             }
 
@@ -270,13 +270,13 @@ void meta_server_failure_detector::on_ping(const 
fd::beacon_msg &beacon,
         failure_detector::on_ping_internal(beacon, ack);
     }
 
-    LOG_INFO("on_ping, beacon send time[%ld], is_master(%s), from_node(%s), 
this_node(%s), "
-             "primary_node(%s)",
-             ack.time,
-             ack.is_master ? "true" : "false",
-             beacon.from_addr.to_string(),
-             ack.this_node.to_string(),
-             ack.primary_node.to_string());
+    LOG_INFO_F("on_ping, beacon send time[{}], is_master({}), from_node({}), 
this_node({}), "
+               "primary_node({})",
+               ack.time,
+               ack.is_master ? "true" : "false",
+               beacon.from_addr,
+               ack.this_node,
+               ack.primary_node);
 
     reply(ack);
 }
@@ -285,7 +285,7 @@ void meta_server_failure_detector::on_ping(const 
fd::beacon_msg &beacon,
 meta_server_failure_detector::meta_server_failure_detector(rpc_address 
leader_address,
                                                            bool 
is_myself_leader)
 {
-    LOG_INFO("set %s as leader", leader_address.to_string());
+    LOG_INFO_F("set {} as leader", leader_address);
     _lock_svc = nullptr;
     _is_leader.store(is_myself_leader);
 }
@@ -293,7 +293,7 @@ 
meta_server_failure_detector::meta_server_failure_detector(rpc_address leader_ad
 void meta_server_failure_detector::set_leader_for_test(rpc_address 
leader_address,
                                                        bool is_myself_leader)
 {
-    LOG_INFO("set %s as leader", leader_address.to_string());
+    LOG_INFO_F("set {} as leader", leader_address);
     _is_leader.store(is_myself_leader);
 }
 
diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp
index e5a6c5923..6c74375e5 100644
--- a/src/meta/meta_service.cpp
+++ b/src/meta/meta_service.cpp
@@ -68,10 +68,10 @@ meta_service::meta_service()
     _state.reset(new server_state());
     _function_level.store(_meta_opts.meta_function_level_on_start);
     if (_meta_opts.recover_from_replica_server) {
-        LOG_INFO("enter recovery mode for 
[meta_server].recover_from_replica_server = true");
+        LOG_INFO_F("enter recovery mode for 
[meta_server].recover_from_replica_server = true");
         _recovering = true;
         if (_meta_opts.meta_function_level_on_start > 
meta_function_level::fl_steady) {
-            LOG_INFO("meta server function level changed to fl_steady under 
recovery mode");
+            LOG_INFO_F("meta server function level changed to fl_steady under 
recovery mode");
             _function_level.store(meta_function_level::fl_steady);
         }
     }
@@ -146,7 +146,7 @@ error_code meta_service::remote_storage_initialize()
     }
     _cluster_root = current.empty() ? "/" : current;
 
-    LOG_INFO("init meta_state_service succeed, cluster_root = %s", 
_cluster_root.c_str());
+    LOG_INFO_F("init meta_state_service succeed, cluster_root = {}", 
_cluster_root);
     return ERR_OK;
 }
 
@@ -278,14 +278,14 @@ void meta_service::start_service()
                            std::chrono::milliseconds(_opts.lb_interval_ms));
 
     if (!_meta_opts.cold_backup_disabled) {
-        LOG_INFO("start backup service");
+        LOG_INFO_F("start backup service");
         tasking::enqueue(LPC_DEFAULT_CALLBACK,
                          nullptr,
                          std::bind(&backup_service::start, 
_backup_handler.get()));
     }
 
     if (_bulk_load_svc) {
-        LOG_INFO("start bulk load service");
+        LOG_INFO_F("start bulk load service");
         tasking::enqueue(LPC_META_CALLBACK, tracker(), [this]() {
             _bulk_load_svc->initialize_bulk_load_service();
         });
@@ -302,7 +302,7 @@ error_code meta_service::start()
 
     err = remote_storage_initialize();
     dreturn_not_ok_logged(err, "init remote storage failed, err = %s", 
err.to_string());
-    LOG_INFO("remote storage is successfully initialized");
+    LOG_INFO_F("remote storage is successfully initialized");
 
     // start failure detector, and try to acquire the leader lock
     _failure_detector.reset(new meta_server_failure_detector(this));
@@ -317,8 +317,8 @@ error_code meta_service::start()
                                    _meta_opts.enable_white_list);
 
     dreturn_not_ok_logged(err, "start failure_detector failed, err = %s", 
err.to_string());
-    LOG_INFO("meta service failure detector is successfully started %s",
-             _meta_opts.enable_white_list ? "with whitelist enabled" : "");
+    LOG_INFO_F("meta service failure detector is successfully started {}",
+               _meta_opts.enable_white_list ? "with whitelist enabled" : "");
 
     // should register rpc handlers before acquiring leader lock, so that this 
meta service
     // can tell others who is the current leader
@@ -330,8 +330,8 @@ error_code meta_service::start()
 
     _failure_detector->acquire_leader_lock();
     CHECK(_failure_detector->get_leader(nullptr), "must be primary at this 
point");
-    LOG_INFO("%s got the primary lock, start to recover server state from 
remote storage",
-             dsn_primary_address().to_string());
+    LOG_INFO_F("{} got the primary lock, start to recover server state from 
remote storage",
+               dsn_primary_address());
 
     // initialize the load balancer
     server_load_balancer *balancer = 
utils::factory_store<server_load_balancer>::create(
@@ -348,7 +348,7 @@ error_code meta_service::start()
     // initializing the backup_handler should after remote_storage be 
initialized,
     // because we should use _cluster_root
     if (!_meta_opts.cold_backup_disabled) {
-        LOG_INFO("initialize backup handler");
+        LOG_INFO_F("initialize backup handler");
         _backup_handler = std::make_shared<backup_service>(
             this,
             meta_options::concat_path_unix_style(_cluster_root, "backup"),
@@ -363,9 +363,9 @@ error_code meta_service::start()
     _state->initialize(this, 
meta_options::concat_path_unix_style(_cluster_root, "apps"));
     while ((err = _state->initialize_data_structure()) != ERR_OK) {
         if (err == ERR_OBJECT_NOT_FOUND && 
_meta_opts.recover_from_replica_server) {
-            LOG_INFO("can't find apps from remote storage, and "
-                     "[meta_server].recover_from_replica_server = true, "
-                     "administrator should recover this cluster manually 
later");
+            LOG_INFO_F("can't find apps from remote storage, and "
+                       "[meta_server].recover_from_replica_server = true, "
+                       "administrator should recover this cluster manually 
later");
             return dsn::ERR_OK;
         }
         LOG_ERROR("initialize server state from remote storage failed, err = 
%s, retry ...",
@@ -383,7 +383,7 @@ error_code meta_service::start()
 
     start_service();
 
-    LOG_INFO("start meta_service succeed");
+    LOG_INFO_F("start meta_service succeed");
 
     return ERR_OK;
 }
@@ -701,9 +701,9 @@ void meta_service::on_update_configuration(dsn::message_ex 
*req)
         _state->query_configuration_by_gpid(request->config.pid, 
response.config);
         reply(req, response);
 
-        LOG_INFO("refuse request %s coz meta function level is %s",
-                 boost::lexical_cast<std::string>(*request).c_str(),
-                 _meta_function_level_VALUES_TO_NAMES.find(level)->second);
+        LOG_INFO_F("refuse request {} coz meta function level is {}",
+                   boost::lexical_cast<std::string>(*request),
+                   _meta_function_level_VALUES_TO_NAMES.find(level)->second);
         return;
     }
 
@@ -745,16 +745,14 @@ void 
meta_service::on_propose_balancer(configuration_balancer_rpc rpc)
     }
 
     const configuration_balancer_request &request = rpc.request();
-    LOG_INFO("get proposal balancer request, gpid(%d.%d)",
-             request.gpid.get_app_id(),
-             request.gpid.get_partition_index());
+    LOG_INFO_F("get proposal balancer request, gpid({})", request.gpid);
     _state->on_propose_balancer(request, rpc.response());
 }
 
 void meta_service::on_start_recovery(configuration_recovery_rpc rpc)
 {
     configuration_recovery_response &response = rpc.response();
-    LOG_INFO("got start recovery request, start to do recovery");
+    LOG_INFO_F("got start recovery request, start to do recovery");
     int result = check_leader(rpc, nullptr);
     // request has been forwarded to others
     if (result == 0) {
@@ -766,8 +764,8 @@ void 
meta_service::on_start_recovery(configuration_recovery_rpc rpc)
     } else {
         zauto_write_lock l(_meta_lock);
         if (_started.load()) {
-            LOG_INFO("service(%s) is already started, ignore the recovery 
request",
-                     dsn_primary_address().to_string());
+            LOG_INFO_F("service({}) is already started, ignore the recovery 
request",
+                       dsn_primary_address());
             response.err = ERR_SERVICE_ALREADY_RUNNING;
         } else {
             _state->on_start_recovery(rpc.request(), response);
diff --git a/src/meta/meta_service.h b/src/meta/meta_service.h
index e89aacaaa..96a74d38e 100644
--- a/src/meta/meta_service.h
+++ b/src/meta/meta_service.h
@@ -370,7 +370,7 @@ bool meta_service::check_status(TRpcHolder rpc, rpc_address 
*forward_address)
 {
     if (!_access_controller->allowed(rpc.dsn_request())) {
         rpc.response().err = ERR_ACL_DENY;
-        LOG_INFO("reject request with ERR_ACL_DENY");
+        LOG_INFO_F("reject request with ERR_ACL_DENY");
         return false;
     }
 
@@ -385,7 +385,7 @@ bool meta_service::check_status(TRpcHolder rpc, rpc_address 
*forward_address)
         } else {
             rpc.response().err = ERR_SERVICE_NOT_ACTIVE;
         }
-        LOG_INFO("reject request with %s", rpc.response().err.to_string());
+        LOG_INFO_F("reject request with {}", rpc.response().err);
         return false;
     }
 
@@ -396,7 +396,7 @@ template <typename TRespType>
 bool meta_service::check_status_with_msg(message_ex *req, TRespType 
&response_struct)
 {
     if (!_access_controller->allowed(req)) {
-        LOG_INFO("reject request with ERR_ACL_DENY");
+        LOG_INFO_F("reject request with ERR_ACL_DENY");
         response_struct.err = ERR_ACL_DENY;
         reply(req, response_struct);
         return false;
@@ -414,7 +414,7 @@ bool meta_service::check_status_with_msg(message_ex *req, 
TRespType &response_st
         } else {
             response_struct.err = ERR_SERVICE_NOT_ACTIVE;
         }
-        LOG_INFO("reject request with %s", response_struct.err.to_string());
+        LOG_INFO_F("reject request with {}", response_struct.err);
         reply(req, response_struct);
         return false;
     }
diff --git a/src/meta/meta_state_service_zookeeper.cpp 
b/src/meta/meta_state_service_zookeeper.cpp
index 14627ab3f..ad2ecb2c8 100644
--- a/src/meta/meta_state_service_zookeeper.cpp
+++ b/src/meta/meta_state_service_zookeeper.cpp
@@ -168,7 +168,7 @@ error_code meta_state_service_zookeeper::initialize(const 
std::vector<std::strin
             return ERR_TIMEOUT;
     }
 
-    LOG_INFO("init meta_state_service_zookeeper succeed");
+    LOG_INFO_F("init meta_state_service_zookeeper succeed");
 
     // Notice: this reference is released in finalize
     add_ref();
diff --git a/src/meta/partition_guardian.cpp b/src/meta/partition_guardian.cpp
index fdf9c9f29..f586048bc 100644
--- a/src/meta/partition_guardian.cpp
+++ b/src/meta/partition_guardian.cpp
@@ -187,11 +187,10 @@ bool partition_guardian::from_proposals(meta_view &view,
 invalid_action:
     std::stringstream ss;
     ss << action;
-    LOG_INFO("proposal action(%s) for gpid(%d.%d) is invalid, clear all 
proposal actions: %s",
-             ss.str().c_str(),
-             gpid.get_app_id(),
-             gpid.get_partition_index(),
-             reason);
+    LOG_INFO_F("proposal action({}) for gpid({}) is invalid, clear all 
proposal actions: {}",
+               ss.str(),
+               gpid,
+               reason);
     action.type = config_type::CT_INVALID;
 
     while (!cc.lb_actions.empty()) {
@@ -293,17 +292,17 @@ pc_status 
partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
             const dropped_replica &dr = cc.dropped[i];
             char time_buf[30];
             ::dsn::utils::time_ms_to_string(dr.time, time_buf);
-            LOG_INFO("%s: config_context.dropped[%d]: "
-                     "node(%s), time(%" PRIu64 "){%s}, ballot(%" PRId64 "), "
-                     "commit_decree(%" PRId64 "), prepare_decree(%" PRId64 ")",
-                     gpid_name,
-                     i,
-                     dr.node.to_string(),
-                     dr.time,
-                     time_buf,
-                     dr.ballot,
-                     dr.last_committed_decree,
-                     dr.last_prepared_decree);
+            LOG_INFO_F("{}: config_context.dropped[{}]: "
+                       "node({}), time({})[{}], ballot({}), "
+                       "commit_decree({}), prepare_decree({})",
+                       gpid_name,
+                       i,
+                       dr.node,
+                       dr.time,
+                       time_buf,
+                       dr.ballot,
+                       dr.last_committed_decree,
+                       dr.last_prepared_decree);
         }
 
         for (int i = 0; i < pc.last_drops.size(); ++i) {
@@ -314,11 +313,11 @@ pc_status 
partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
                     break;
                 }
             }
-            LOG_INFO("%s: config_context.last_drops[%d]: node(%s), 
dropped_index(%d)",
-                     gpid_name,
-                     i,
-                     pc.last_drops[i].to_string(),
-                     dropped_index);
+            LOG_INFO_F("{}: config_context.last_drops[{}]: node({}), 
dropped_index({})",
+                       gpid_name,
+                       i,
+                       pc.last_drops[i],
+                       dropped_index);
         }
 
         if (pc.last_drops.size() == 1) {
@@ -331,10 +330,10 @@ pc_status 
partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
             std::vector<dropped_replica> collected_info(2);
             bool ready = true;
 
-            LOG_INFO("%s: last two drops are %s and %s (the latest dropped)",
-                     gpid_name,
-                     nodes[0].to_string(),
-                     nodes[1].to_string());
+            LOG_INFO_F("{}: last two drops are {} and {} (the latest dropped)",
+                       gpid_name,
+                       nodes[0],
+                       nodes[1]);
 
             for (unsigned int i = 0; i < nodes.size(); ++i) {
                 node_state *ns = get_node_state(*view.nodes, nodes[i], false);
@@ -347,10 +346,10 @@ pc_status 
partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi
                     std::vector<dropped_replica>::iterator it = 
cc.find_from_dropped(nodes[i]);
                     if (it == cc.dropped.end() || it->ballot == 
invalid_ballot) {
                         if (ns->has_collected()) {
-                            LOG_INFO("%s: ignore %s's replica info as it 
doesn't exist on replica "
-                                     "server",
-                                     gpid_name,
-                                     nodes[i].to_string());
+                            LOG_INFO_F("{}: ignore {}'s replica info as it 
doesn't exist on "
+                                       "replica server",
+                                       gpid_name,
+                                       nodes[i]);
                             collected_info[i] = {nodes[i], 0, -1, -1, -1};
                         } else {
                             ready = false;
@@ -403,8 +402,7 @@ pc_status partition_guardian::on_missing_primary(meta_view 
&view, const dsn::gpi
                                               ? previous_dead.node
                                               : recent_dead.node;
                         }
-                        LOG_INFO(
-                            "%s: select %s as a new primary", gpid_name, 
action.node.to_string());
+                        LOG_INFO_F("{}: select {} as a new primary", 
gpid_name, action.node);
                     } else {
                         char buf[1000];
                         sprintf(buf,
@@ -485,25 +483,25 @@ pc_status 
partition_guardian::on_missing_secondary(meta_view &view, const dsn::g
         // when max_replica_count == 2, even if there is only 1 replica alive 
now, we will still
         // wait for replica_assign_delay_ms_for_dropouts before recover the 
second replica.
         is_emergency = true;
-        LOG_INFO("gpid(%s): is emergency due to too few replicas", 
gpid.to_string());
+        LOG_INFO_F("gpid({}): is emergency due to too few replicas", gpid);
     } else if (cc.dropped.empty()) {
         is_emergency = true;
-        LOG_INFO("gpid(%s): is emergency due to no dropped candidate", 
gpid.to_string());
+        LOG_INFO_F("gpid({}): is emergency due to no dropped candidate", gpid);
     } else if (has_milliseconds_expired(cc.dropped.back().time +
                                         
_replica_assign_delay_ms_for_dropouts)) {
         is_emergency = true;
         char time_buf[30];
         ::dsn::utils::time_ms_to_string(cc.dropped.back().time, time_buf);
-        LOG_INFO("gpid(%s): is emergency due to lose secondary for a long 
time, "
-                 "last_dropped_node(%s), drop_time(%s), delay_ms(%" PRIu64 ")",
-                 gpid.to_string(),
-                 cc.dropped.back().node.to_string(),
-                 time_buf,
-                 _replica_assign_delay_ms_for_dropouts);
+        LOG_INFO_F("gpid({}): is emergency due to lose secondary for a long 
time, "
+                   "last_dropped_node({}), drop_time({}), delay_ms({})",
+                   gpid,
+                   cc.dropped.back().node,
+                   time_buf,
+                   _replica_assign_delay_ms_for_dropouts);
     } else if (in_black_list(cc.dropped.back().node)) {
-        LOG_INFO("gpid(%s) is emergency due to recent dropped(%s) is in black 
list",
-                 gpid.to_string(),
-                 cc.dropped.back().node.to_string());
+        LOG_INFO_F("gpid({}) is emergency due to recent dropped({}) is in 
black list",
+                   gpid,
+                   cc.dropped.back().node);
         is_emergency = true;
     }
     action.node.set_invalid();
@@ -515,51 +513,51 @@ pc_status 
partition_guardian::on_missing_secondary(meta_view &view, const dsn::g
                 oss << ",";
             oss << cc.dropped[i].node.to_string();
         }
-        LOG_INFO("gpid(%s): try to choose node in dropped list, 
dropped_list(%s), "
-                 "prefered_dropped(%d)",
-                 gpid.to_string(),
-                 oss.str().c_str(),
-                 cc.prefered_dropped);
+        LOG_INFO_F(
+            "gpid({}): try to choose node in dropped list, dropped_list({}), 
prefered_dropped({})",
+            gpid,
+            oss.str(),
+            cc.prefered_dropped);
         if (cc.prefered_dropped < 0 || cc.prefered_dropped >= 
(int)cc.dropped.size()) {
-            LOG_INFO("gpid(%s): prefered_dropped(%d) is invalid according to 
drop_list(size %d), "
-                     "reset it to %d (drop_list.size - 1)",
-                     gpid.to_string(),
-                     cc.prefered_dropped,
-                     (int)cc.dropped.size(),
-                     (int)cc.dropped.size() - 1);
+            LOG_INFO_F("gpid({}): prefered_dropped({}) is invalid according to 
drop_list(size {}), "
+                       "reset it to {} (drop_list.size - 1)",
+                       gpid,
+                       cc.prefered_dropped,
+                       cc.dropped.size(),
+                       cc.dropped.size() - 1);
             cc.prefered_dropped = (int)cc.dropped.size() - 1;
         }
 
         while (cc.prefered_dropped >= 0) {
             const dropped_replica &server = cc.dropped[cc.prefered_dropped];
             if (is_node_alive(*view.nodes, server.node)) {
-                LOG_INFO("gpid(%s): node(%s) at cc.dropped[%d] is alive now, 
choose it, "
-                         "and forward prefered_dropped from (%d) to (%d)",
-                         gpid.to_string(),
-                         server.node.to_string(),
-                         cc.prefered_dropped,
-                         cc.prefered_dropped,
-                         cc.prefered_dropped - 1);
+                LOG_INFO_F("gpid({}): node({}) at cc.dropped[{}] is alive now, 
choose it, "
+                           "and forward prefered_dropped from {} to {}",
+                           gpid,
+                           server.node,
+                           cc.prefered_dropped,
+                           cc.prefered_dropped,
+                           cc.prefered_dropped - 1);
                 action.node = server.node;
                 cc.prefered_dropped--;
                 break;
             } else {
-                LOG_INFO("gpid(%s): node(%s) at cc.dropped[%d] is not alive 
now, "
-                         "changed prefered_dropped from (%d) to (%d)",
-                         gpid.to_string(),
-                         server.node.to_string(),
-                         cc.prefered_dropped,
-                         cc.prefered_dropped,
-                         cc.prefered_dropped - 1);
+                LOG_INFO_F("gpid({}): node({}) at cc.dropped[{}] is not alive 
now, "
+                           "changed prefered_dropped from {} to {}",
+                           gpid,
+                           server.node,
+                           cc.prefered_dropped,
+                           cc.prefered_dropped,
+                           cc.prefered_dropped - 1);
                 cc.prefered_dropped--;
             }
         }
 
         if (action.node.is_invalid() || in_black_list(action.node)) {
             if (!action.node.is_invalid()) {
-                LOG_INFO("gpid(%s) refuse to use selected node(%s) as it is in 
black list",
-                         gpid.to_string(),
-                         action.node.to_string());
+                LOG_INFO_F("gpid({}) refuse to use selected node({}) as it is 
in black list",
+                           gpid,
+                           action.node);
             }
             newly_partitions *min_server_np = nullptr;
             for (auto &pairs : *view.nodes) {
@@ -575,14 +573,14 @@ pc_status 
partition_guardian::on_missing_secondary(meta_view &view, const dsn::g
             }
 
             if (!action.node.is_invalid()) {
-                LOG_INFO("gpid(%s): can't find valid node in dropped list to 
add as secondary, "
-                         "choose new node(%s) with minimal partitions serving",
-                         gpid.to_string(),
-                         action.node.to_string());
+                LOG_INFO_F("gpid({}): can't find valid node in dropped list to 
add as secondary, "
+                           "choose new node({}) with minimal partitions 
serving",
+                           gpid,
+                           action.node);
             } else {
-                LOG_INFO("gpid(%s): can't find valid node in dropped list to 
add as secondary, "
-                         "but also we can't find a new node to add as 
secondary",
-                         gpid.to_string());
+                LOG_INFO_F("gpid({}): can't find valid node in dropped list to 
add as secondary, "
+                           "but also we can't find a new node to add as 
secondary",
+                           gpid);
             }
         }
     } else {
@@ -596,15 +594,15 @@ pc_status 
partition_guardian::on_missing_secondary(meta_view &view, const dsn::g
         }
 
         if (!action.node.is_invalid()) {
-            LOG_INFO("gpid(%s): choose node(%s) as secondary coz it is 
last_dropped_node and is "
-                     "alive now",
-                     gpid.to_string(),
-                     server.node.to_string());
+            LOG_INFO_F("gpid({}): choose node({}) as secondary coz it is 
last_dropped_node and is "
+                       "alive now",
+                       gpid,
+                       server.node);
         } else {
-            LOG_INFO("gpid(%s): can't add secondary coz last_dropped_node(%s) 
is not alive now, "
-                     "ignore this as not in emergency",
-                     gpid.to_string(),
-                     server.node.to_string());
+            LOG_INFO_F("gpid({}): can't add secondary coz 
last_dropped_node({}) is not alive now, "
+                       "ignore this as not in emergency",
+                       gpid,
+                       server.node);
         }
     }
 
@@ -652,9 +650,9 @@ void partition_guardian::finish_cure_proposal(meta_view 
&view,
 {
     newly_partitions *np = get_newly_partitions(*(view.nodes), act.node);
     if (np == nullptr) {
-        LOG_INFO("can't get the newly_partitions extension structure for 
node(%s), "
-                 "the node may be dead and removed",
-                 act.node.to_string());
+        LOG_INFO_F("can't get the newly_partitions extension structure for 
node({}), "
+                   "the node may be dead and removed",
+                   act.node);
     } else {
         if (act.type == config_type::CT_ASSIGN_PRIMARY) {
             np->newly_remove_primary(gpid.get_app_id(), false);
diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp
index a0fc589aa..120b742ce 100644
--- a/src/meta/server_state.cpp
+++ b/src/meta/server_state.cpp
@@ -215,7 +215,7 @@ bool server_state::spin_wait_staging(int timeout_seconds)
         if (c == 0) {
             return true;
         }
-        LOG_INFO("there are (%d) apps still in staging, just wait...", c);
+        LOG_INFO_F("there are {} apps still in staging, just wait...", c);
         std::this_thread::sleep_for(std::chrono::seconds(1));
         if (timeout_seconds > 0) {
             --timeout_seconds;
@@ -272,10 +272,10 @@ void 
server_state::transition_staging_state(std::shared_ptr<app_state> &app)
               enum_to_string(app->status));
     }
 
-    LOG_INFO("app(%s) transfer from %s to %s",
-             app->get_logname(),
-             enum_to_string(old_status),
-             enum_to_string(app->status));
+    LOG_INFO_F("app({}) transfer from {} to {}",
+               app->get_logname(),
+               enum_to_string(old_status),
+               enum_to_string(app->status));
 #undef send_response
 }
 
@@ -328,7 +328,7 @@ error_code server_state::dump_from_remote_storage(const 
char *local_path, bool s
     if (sync_immediately) {
         ec = sync_apps_from_remote_storage();
         if (ec == ERR_OBJECT_NOT_FOUND) {
-            LOG_INFO("remote storage is empty, just stop the dump");
+            LOG_INFO_F("remote storage is empty, just stop the dump");
             return ERR_OK;
         } else if (ec != ERR_OK) {
             LOG_ERROR("sync from remote storage failed, err(%s)", 
ec.to_string());
@@ -350,7 +350,7 @@ error_code server_state::dump_from_remote_storage(const 
char *local_path, bool s
         {
             zauto_read_lock l(_lock);
             if (count_staging_app() != 0) {
-                LOG_INFO("there are apps in staging, skip this dump");
+                LOG_INFO_F("there are apps in staging, skip this dump");
                 return ERR_INVALID_STATE;
             }
             snapshots.reserve(_all_apps.size());
@@ -429,7 +429,7 @@ error_code server_state::initialize_default_apps()
 {
     std::vector<const char *> sections;
     dsn_config_get_all_sections(sections);
-    LOG_INFO("start to do initialize");
+    LOG_INFO_F("start to do initialize");
 
     app_info default_app;
     for (int i = 0; i < sections.size(); i++) {
@@ -502,7 +502,7 @@ error_code server_state::sync_apps_to_remote_storage()
         LOG_ERROR("create root node /apps in meta store failed, err = %s", 
err.to_string());
         return err;
     } else {
-        LOG_INFO("set %s to lock state in remote storage", _apps_root.c_str());
+        LOG_INFO_F("set {} to lock state in remote storage", _apps_root);
     }
 
     err = ERR_OK;
@@ -524,7 +524,7 @@ error_code server_state::sync_apps_to_remote_storage()
                                                  ec.to_string());
                                      err = ec;
                                  } else {
-                                     LOG_INFO("create app node %s ok", 
path.c_str());
+                                     LOG_INFO_F("create app node {} ok", path);
                                  }
                              },
                              value,
@@ -551,7 +551,7 @@ error_code server_state::sync_apps_to_remote_storage()
                                                   [&err](dsn::error_code e) { 
err = e; });
     t->wait();
     if (dsn::ERR_OK == err) {
-        LOG_INFO("set %s to unlock state in remote storage", 
_apps_root.c_str());
+        LOG_INFO_F("set {} to unlock state in remote storage", _apps_root);
         return err;
     } else {
         LOG_ERROR("set %s to unlock state in remote storage failed, 
reason(%s)",
@@ -756,7 +756,7 @@ error_code server_state::initialize_data_structure()
         if (_meta_svc->get_meta_options().recover_from_replica_server) {
             return ERR_OBJECT_NOT_FOUND;
         } else {
-            LOG_INFO("can't find apps from remote storage, start to initialize 
default apps");
+            LOG_INFO_F("can't find apps from remote storage, start to 
initialize default apps");
             err = initialize_default_apps();
         }
     } else if (err == ERR_OK) {
@@ -765,8 +765,8 @@ error_code server_state::initialize_data_structure()
                   "find apps from remote storage, but "
                   "[meta_server].recover_from_replica_server = true");
         } else {
-            LOG_INFO(
-                "sync apps from remote storage ok, get %d apps, init the node 
state accordingly",
+            LOG_INFO_F(
+                "sync apps from remote storage ok, get {} apps, init the node 
state accordingly",
                 _all_apps.size());
             initialize_node_state();
         }
@@ -794,9 +794,9 @@ void 
server_state::on_config_sync(configuration_query_by_node_rpc rpc)
 
     bool reject_this_request = false;
     response.__isset.gc_replicas = false;
-    LOG_INFO("got config sync request from %s, stored_replicas_count(%d)",
-             request.node.to_string(),
-             (int)request.stored_replicas.size());
+    LOG_INFO_F("got config sync request from {}, stored_replicas_count({})",
+               request.node,
+               request.stored_replicas.size());
 
     {
         zauto_read_lock l(_lock);
@@ -804,7 +804,7 @@ void 
server_state::on_config_sync(configuration_query_by_node_rpc rpc)
         // sync the partitions to the replica server
         node_state *ns = get_node_state(_nodes, request.node, false);
         if (ns == nullptr) {
-            LOG_INFO("node(%s) not found in meta server", 
request.node.to_string());
+            LOG_INFO_F("node({}) not found in meta server", request.node);
             response.err = ERR_OBJECT_NOT_FOUND;
         } else {
             response.err = ERR_OK;
@@ -882,23 +882,20 @@ void 
server_state::on_config_sync(configuration_query_by_node_rpc rpc)
                     }
                 } else if (app->status == app_status::AS_DROPPED) {
                     if (app->expire_second == 0) {
-                        LOG_INFO(
-                            "gpid(%d.%d) on node(%s) is of dropped table, but 
expire second is "
-                            "not specified, do not delete it for safety 
reason",
-                            rep.pid.get_app_id(),
-                            rep.pid.get_partition_index(),
-                            request.node.to_string());
+                        LOG_INFO_F("gpid({}) on node({}) is of dropped table, 
but expire second is "
+                                   "not specified, do not delete it for safety 
reason",
+                                   rep.pid,
+                                   request.node);
                     } else if (has_seconds_expired(app->expire_second)) {
                         // can delete replica only when expire second is 
explicitely specified and
                         // expired.
                         if (level <= meta_function_level::fl_steady) {
-                            LOG_INFO("gpid(%d.%d) on node(%s) is of dropped 
and expired table, but "
-                                     "current function level is %s, do not 
delete it for safety "
-                                     "reason",
-                                     rep.pid.get_app_id(),
-                                     rep.pid.get_partition_index(),
-                                     request.node.to_string(),
-                                     
_meta_function_level_VALUES_TO_NAMES.find(level)->second);
+                            LOG_INFO_F("gpid({}) on node({}) is of dropped and 
expired table, but "
+                                       "current function level is {}, do not 
delete it for safety "
+                                       "reason",
+                                       rep.pid,
+                                       request.node,
+                                       
_meta_function_level_VALUES_TO_NAMES.find(level)->second);
                         } else {
                             response.gc_replicas.push_back(rep);
                             LOG_WARNING(
@@ -914,13 +911,11 @@ void 
server_state::on_config_sync(configuration_query_by_node_rpc rpc)
                         collect_replica({&_all_apps, &_nodes}, request.node, 
rep);
                     if (!is_useful_replica) {
                         if (level <= meta_function_level::fl_steady) {
-                            LOG_INFO(
-                                "gpid(%d.%d) on node(%s) is useless, but 
current function level "
-                                "is %s, do not delete it for safety reason",
-                                rep.pid.get_app_id(),
-                                rep.pid.get_partition_index(),
-                                request.node.to_string(),
-                                
_meta_function_level_VALUES_TO_NAMES.find(level)->second);
+                            LOG_INFO_F("gpid({}) on node({}) is useless, but 
current function "
+                                       "level is {}, do not delete it for 
safety reason",
+                                       rep.pid,
+                                       request.node,
+                                       
_meta_function_level_VALUES_TO_NAMES.find(level)->second);
                         } else {
                             response.gc_replicas.push_back(rep);
                             LOG_WARNING("notify node(%s) to gc replica(%d.%d) 
coz it is useless",
@@ -1204,7 +1199,7 @@ void server_state::drop_app(dsn::message_ex *msg)
     bool do_dropping = false;
     std::shared_ptr<app_state> app;
     dsn::unmarshall(msg, request);
-    LOG_INFO("drop app request, name(%s)", request.app_name.c_str());
+    LOG_INFO_F("drop app request, name({})", request.app_name);
     {
         zauto_write_lock l(_lock);
         app = get_app(request.app_name);
@@ -1355,7 +1350,7 @@ void server_state::recall_app(dsn::message_ex *msg)
     std::shared_ptr<app_state> target_app;
 
     dsn::unmarshall(msg, request);
-    LOG_INFO("recall app request, app_id(%d)", request.app_id);
+    LOG_INFO_F("recall app request, app_id({})", request.app_id);
 
     bool do_recalling = false;
     {
@@ -1417,13 +1412,12 @@ void server_state::list_apps(const 
configuration_list_apps_request &request,
 
 void server_state::send_proposal(rpc_address target, const 
configuration_update_request &proposal)
 {
-    LOG_INFO("send proposal %s for gpid(%d.%d), ballot = %" PRId64 ", target = 
%s, node = %s",
-             ::dsn::enum_to_string(proposal.type),
-             proposal.config.pid.get_app_id(),
-             proposal.config.pid.get_partition_index(),
-             proposal.config.ballot,
-             target.to_string(),
-             proposal.node.to_string());
+    LOG_INFO_F("send proposal {} for gpid({}), ballot = {}, target = {}, node 
= {}",
+               ::dsn::enum_to_string(proposal.type),
+               proposal.config.pid,
+               proposal.config.ballot,
+               target,
+               proposal.node);
     dsn::message_ex *msg =
         dsn::message_ex::create_request(RPC_CONFIG_PROPOSAL, 0, 
proposal.config.pid.thread_hash());
     ::marshall(msg, proposal);
@@ -1592,15 +1586,15 @@ void server_state::update_configuration_locally(
     old_cfg = config_request->config;
     auto find_name = _config_type_VALUES_TO_NAMES.find(config_request->type);
     if (find_name != _config_type_VALUES_TO_NAMES.end()) {
-        LOG_INFO("meta update config ok: type(%s), old_config=%s, %s",
-                 find_name->second,
-                 old_config_str.c_str(),
-                 boost::lexical_cast<std::string>(*config_request).c_str());
+        LOG_INFO_F("meta update config ok: type({}), old_config={}, {}",
+                   find_name->second,
+                   old_config_str,
+                   boost::lexical_cast<std::string>(*config_request));
     } else {
-        LOG_INFO("meta update config ok: type(%d), old_config=%s, %s",
-                 config_request->type,
-                 old_config_str.c_str(),
-                 boost::lexical_cast<std::string>(*config_request).c_str());
+        LOG_INFO_F("meta update config ok: type({}), old_config={}, {}",
+                   config_request->type,
+                   old_config_str,
+                   boost::lexical_cast<std::string>(*config_request));
     }
 
 #ifndef NDEBUG
@@ -1624,8 +1618,8 @@ task_ptr server_state::update_configuration_on_remote(
 {
     meta_function_level::type l = _meta_svc->get_function_level();
     if (l <= meta_function_level::fl_blind) {
-        LOG_INFO("ignore update configuration on remote due to level is %s",
-                 _meta_function_level_VALUES_TO_NAMES.find(l)->second);
+        LOG_INFO_F("ignore update configuration on remote due to level is {}",
+                   _meta_function_level_VALUES_TO_NAMES.find(l)->second);
         // NOTICE: pending_sync_task need to be reassigned
         return tasking::enqueue(
             LPC_META_STATE_HIGH,
@@ -1849,10 +1843,10 @@ void 
server_state::downgrade_secondary_to_inactive(std::shared_ptr<app_state> &a
         request.node = node;
         send_proposal(pc.primary, request);
     } else {
-        LOG_INFO("gpid(%d.%d) is syncing with remote storage, ignore the 
remove seconary(%s)",
-                 app->app_id,
-                 pidx,
-                 node.to_string());
+        LOG_INFO_F("gpid({}.{}) is syncing with remote storage, ignore the 
remove seconary({})",
+                   app->app_id,
+                   pidx,
+                   node);
     }
 }
 
@@ -1918,13 +1912,13 @@ void server_state::on_update_configuration(
     CHECK(app->is_stateful, "don't support stateless apps currently, id({})", 
gpid.get_app_id());
     auto find_name = _config_type_VALUES_TO_NAMES.find(cfg_request->type);
     if (find_name != _config_type_VALUES_TO_NAMES.end()) {
-        LOG_INFO("recv update config request: type(%s), %s",
-                 find_name->second,
-                 boost::lexical_cast<std::string>(*cfg_request).c_str());
+        LOG_INFO_F("recv update config request: type({}), {}",
+                   find_name->second,
+                   boost::lexical_cast<std::string>(*cfg_request));
     } else {
-        LOG_INFO("recv update config request: type(%d), %s",
-                 cfg_request->type,
-                 boost::lexical_cast<std::string>(*cfg_request).c_str());
+        LOG_INFO_F("recv update config request: type({}), {}",
+                   cfg_request->type,
+                   boost::lexical_cast<std::string>(*cfg_request));
     }
 
     if (is_partition_config_equal(pc, cfg_request->config)) {
diff --git a/src/meta/server_state_restore.cpp 
b/src/meta/server_state_restore.cpp
index e6c9586d4..ffdd4bf08 100644
--- a/src/meta/server_state_restore.cpp
+++ b/src/meta/server_state_restore.cpp
@@ -197,11 +197,10 @@ void 
server_state::on_recv_restore_report(configuration_report_restore_status_rp
         if (request.__isset.reason) {
             r_state.reason = request.reason;
         }
-        LOG_INFO("%d.%d restore report: restore_status(%s), progress(%d)",
-                 request.pid.get_app_id(),
-                 request.pid.get_partition_index(),
-                 request.restore_status.to_string(),
-                 request.progress);
+        LOG_INFO_F("{} restore report: restore_status({}), progress({})",
+                   request.pid,
+                   request.restore_status,
+                   request.progress);
     }
 }
 
diff --git a/src/meta/test/backup_test.cpp b/src/meta/test/backup_test.cpp
index 3c0896284..0a341cf5f 100644
--- a/src/meta/test/backup_test.cpp
+++ b/src/meta/test/backup_test.cpp
@@ -76,7 +76,7 @@ protected:
     MOCK_HELPER_FUNCS(method_name)                                             
                    \
     void method_name()                                                         
                    \
     {                                                                          
                    \
-        LOG_INFO("%s is called", #method_name);                                
                    \
+        LOG_INFO_F("{} is called", #method_name);                              
                    \
         int &c = counter_##method_name();                                      
                    \
         ++c;                                                                   
                    \
         int max_call = maxcall_##method_name();                                
                    \
@@ -92,7 +92,7 @@ protected:
     MOCK_HELPER_FUNCS(method_name)                                             
                    \
     void method_name(type1 arg1)                                               
                    \
     {                                                                          
                    \
-        LOG_INFO("%s is called", #method_name);                                
                    \
+        LOG_INFO_F("{} is called", #method_name);                              
                    \
         int &c = counter_##method_name();                                      
                    \
         ++c;                                                                   
                    \
         int max_call = maxcall_##method_name();                                
                    \
@@ -108,7 +108,7 @@ protected:
     MOCK_HELPER_FUNCS(method_name)                                             
                    \
     void method_name(type1 arg1, type2 arg2)                                   
                    \
     {                                                                          
                    \
-        LOG_INFO("%s is called", #method_name);                                
                    \
+        LOG_INFO_F("{} is called", #method_name);                              
                    \
         int &c = counter_##method_name();                                      
                    \
         ++c;                                                                   
                    \
         int max_call = maxcall_##method_name();                                
                    \
@@ -124,7 +124,7 @@ protected:
     MOCK_HELPER_FUNCS(method_name)                                             
                    \
     void method_name(type1 arg1, type2 arg2, type3, arg3)                      
                    \
     {                                                                          
                    \
-        LOG_INFO("%s is called", #method_name);                                
                    \
+        LOG_INFO_F("{} is called", #method_name);                              
                    \
         int &c = counter_##method_name();                                      
                    \
         ++c;                                                                   
                    \
         int max_call = maxcall_##method_name();                                
                    \
diff --git a/src/meta/test/meta_state/meta_state_service.cpp 
b/src/meta/test/meta_state/meta_state_service.cpp
index d6196a6b2..46bd969d6 100644
--- a/src/meta/test/meta_state/meta_state_service.cpp
+++ b/src/meta/test/meta_state/meta_state_service.cpp
@@ -258,7 +258,7 @@ void provider_recursively_create_delete_test(const 
service_creator_func &creator
         ->delete_node("/r",
                       true,
                       META_STATE_SERVICE_SIMPLE_TEST_CALLBACK,
-                      [](error_code ec) { LOG_INFO("result: %s", 
ec.to_string()); })
+                      [](error_code ec) { LOG_INFO_F("result: {}", ec); })
         ->wait();
     service->create_node(
         "/r",
diff --git a/src/meta/test/update_configuration_test.cpp 
b/src/meta/test/update_configuration_test.cpp
index 10ff2e8b3..1a5d17c96 100644
--- a/src/meta/test/update_configuration_test.cpp
+++ b/src/meta/test/update_configuration_test.cpp
@@ -120,7 +120,7 @@ class null_meta_service : public 
dsn::replication::meta_service
 public:
     void send_message(const dsn::rpc_address &target, dsn::message_ex *request)
     {
-        LOG_INFO("send request to %s", target.to_string());
+        LOG_INFO_F("send request to {}", target);
         request->add_ref();
         request->release_ref();
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to