This is an automated email from the ASF dual-hosted git repository.

zhaoliwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git


The following commit(s) were added to refs/heads/master by this push:
     new 180277a28 refactor(log): use LOG_DEBUG_F instead of LOG_DEBUG (#1306)
180277a28 is described below

commit 180277a28e154579f4204df8628ad67715521bec
Author: Yingchun Lai <[email protected]>
AuthorDate: Wed Jan 11 14:20:40 2023 +0800

    refactor(log): use LOG_DEBUG_F instead of LOG_DEBUG (#1306)
---
 src/aio/disk_engine.cpp                            |  8 +--
 src/block_service/fds/fds_service.cpp              |  2 +-
 src/block_service/local/local_service.cpp          | 23 ++++----
 src/client/partition_resolver.h                    |  2 +
 src/client/partition_resolver_simple.cpp           | 20 +++----
 src/failure_detector/failure_detector.cpp          | 12 ++--
 src/failure_detector/test/failure_detector.cpp     | 20 +++----
 src/meta/load_balance_policy.cpp                   |  7 +--
 src/meta/meta_backup_service.cpp                   |  6 +-
 src/meta/meta_data.cpp                             |  6 +-
 src/meta/meta_server_failure_detector.cpp          |  5 +-
 src/meta/meta_service.cpp                          |  2 +-
 src/meta/meta_service.h                            |  2 +-
 src/meta/meta_state_service_simple.cpp             |  2 +-
 src/meta/meta_state_service_zookeeper.cpp          | 18 +++---
 src/meta/server_state.cpp                          | 29 ++++------
 .../test/balancer_simulator/balancer_simulator.cpp |  4 +-
 src/meta/test/balancer_validator.cpp               | 24 ++++----
 src/meta/test/misc/misc.cpp                        | 19 +++----
 src/nfs/nfs_server_impl.cpp                        | 12 +---
 src/perf_counter/perf_counters.cpp                 |  5 +-
 src/perf_counter/perf_counters.h                   |  2 +-
 src/redis_protocol/proxy_lib/proxy_layer.h         |  2 +
 src/redis_protocol/proxy_lib/redis_parser.cpp      | 38 ++++---------
 src/replica/mutation.cpp                           |  7 +--
 src/replica/mutation_log.cpp                       | 54 ++++++++----------
 src/replica/mutation_log_replay.cpp                |  2 +-
 src/replica/replica.cpp                            | 38 ++++++-------
 src/replica/replica_2pc.cpp                        | 35 +++++-------
 src/replica/replica_config.cpp                     |  7 +--
 src/replica/replica_init.cpp                       | 64 ++++++++++------------
 src/replica/replica_learn.cpp                      |  7 +--
 src/replica/replica_restore.cpp                    |  4 +-
 .../storage/simple_kv/simple_kv.server.impl.cpp    |  6 +-
 src/reporter/pegasus_counter_reporter.cpp          |  4 +-
 src/runtime/rpc/asio_rpc_session.cpp               | 10 ++--
 src/runtime/rpc/network.cpp                        | 23 ++++----
 src/runtime/rpc/rpc_engine.cpp                     | 12 ++--
 src/runtime/task/task.h                            |  9 +--
 src/server/available_detector.cpp                  | 22 ++++----
 src/server/info_collector.cpp                      |  4 +-
 src/server/result_writer.cpp                       |  8 +--
 src/test/kill_test/data_verifier.cpp               | 20 +++----
 src/test/kill_test/kill_testor.cpp                 | 14 ++---
 src/utils/api_utilities.h                          |  1 -
 src/utils/token_bucket_throttling_controller.cpp   |  6 +-
 src/zookeeper/lock_struct.cpp                      |  2 +-
 src/zookeeper/zookeeper_session.cpp                | 16 +++---
 48 files changed, 287 insertions(+), 358 deletions(-)

diff --git a/src/aio/disk_engine.cpp b/src/aio/disk_engine.cpp
index e69f5c982..521713f46 100644
--- a/src/aio/disk_engine.cpp
+++ b/src/aio/disk_engine.cpp
@@ -236,10 +236,10 @@ void disk_engine::process_write(aio_task *aio, uint64_t 
sz)
 void disk_engine::complete_io(aio_task *aio, error_code err, uint64_t bytes)
 {
     if (err != ERR_OK) {
-        LOG_DEBUG("disk operation failure with code %s, err = %s, aio_task_id 
= %016" PRIx64,
-                  aio->spec().name.c_str(),
-                  err.to_string(),
-                  aio->id());
+        LOG_DEBUG_F("disk operation failure with code {}, err = {}, 
aio_task_id = {:#018x}",
+                    aio->spec().name,
+                    err,
+                    aio->id());
     }
 
     // batching
diff --git a/src/block_service/fds/fds_service.cpp 
b/src/block_service/fds/fds_service.cpp
index 9a89a1726..5eb7161b0 100644
--- a/src/block_service/fds/fds_service.cpp
+++ b/src/block_service/fds/fds_service.cpp
@@ -500,7 +500,7 @@ error_code fds_file_object::get_content(uint64_t pos,
                                _fds_path,
                                pos + transfered_bytes,
                                length - transfered_bytes);
-            LOG_DEBUG("get object from fds succeed, remote_file(%s)", 
_fds_path.c_str());
+            LOG_DEBUG_F("get object from fds succeed, remote_file({})", 
_fds_path);
             std::istream &is = obj->objectContent();
             transfered_bytes += utils::copy_stream(is, os, PIECE_SIZE);
             err = ERR_OK;
diff --git a/src/block_service/local/local_service.cpp 
b/src/block_service/local/local_service.cpp
index 5a0f8222e..566b3e6b6 100644
--- a/src/block_service/local/local_service.cpp
+++ b/src/block_service/local/local_service.cpp
@@ -186,7 +186,7 @@ dsn::task_ptr local_service::create_file(const 
create_file_request &req,
         if (utils::filesystem::file_exists(file_path) &&
             utils::filesystem::file_exists(meta_file_path)) {
 
-            LOG_DEBUG("file(%s) already exist", file_path.c_str());
+            LOG_DEBUG_F("file({}) already exist", file_path);
             resp.err = f->load_metadata();
         }
 
@@ -332,7 +332,7 @@ dsn::task_ptr local_file_object::write(const write_request 
&req,
         }
 
         if (resp.err == ERR_OK) {
-            LOG_DEBUG("start write file, file = %s", file_name().c_str());
+            LOG_DEBUG_F("start write file, file = {}", file_name());
 
             std::ofstream fout(file_name(), std::ifstream::out | 
std::ifstream::trunc);
             if (!fout.is_open()) {
@@ -386,7 +386,7 @@ dsn::task_ptr local_file_object::read(const read_request 
&req,
                     total_sz = req.remote_length;
                 }
 
-                LOG_DEBUG("read file(%s), size = %ld", file_name().c_str(), 
total_sz);
+                LOG_DEBUG_F("read file({}), size = {}", file_name(), total_sz);
                 std::string buf;
                 buf.resize(total_sz + 1);
                 std::ifstream fin(file_name(), std::ifstream::in);
@@ -439,9 +439,9 @@ dsn::task_ptr local_file_object::upload(const 
upload_request &req,
         }
 
         if (resp.err == ERR_OK) {
-            LOG_DEBUG("start to transfer from src_file(%s) to des_file(%s)",
-                      req.input_local_name.c_str(),
-                      file_name().c_str());
+            LOG_DEBUG_F("start to transfer from src_file({}) to dst_file({})",
+                        req.input_local_name,
+                        file_name());
             int64_t total_sz = 0;
             char buf[max_length] = {'\0'};
             while (!fin.eof()) {
@@ -449,8 +449,7 @@ dsn::task_ptr local_file_object::upload(const 
upload_request &req,
                 total_sz += fin.gcount();
                 fout.write(buf, fin.gcount());
             }
-            LOG_DEBUG(
-                "finish upload file, file = %s, total_size = %d", 
file_name().c_str(), total_sz);
+            LOG_DEBUG_F("finish upload file, file = {}, total_size = {}", 
file_name(), total_sz);
             fout.close();
             fin.close();
 
@@ -527,9 +526,8 @@ dsn::task_ptr local_file_object::download(const 
download_request &req,
             }
 
             if (resp.err == ERR_OK) {
-                LOG_DEBUG("start to transfer, src_file(%s), des_file(%s)",
-                          file_name().c_str(),
-                          target_file.c_str());
+                LOG_DEBUG_F(
+                    "start to transfer, src_file({}), dst_file({})", 
file_name(), target_file);
                 int64_t total_sz = 0;
                 char buf[max_length] = {'\0'};
                 while (!fin.eof()) {
@@ -537,8 +535,7 @@ dsn::task_ptr local_file_object::download(const 
download_request &req,
                     total_sz += fin.gcount();
                     fout.write(buf, fin.gcount());
                 }
-                LOG_DEBUG(
-                    "finish download file(%s), total_size = %d", 
target_file.c_str(), total_sz);
+                LOG_DEBUG_F("finish download file({}), total_size = {}", 
target_file, total_sz);
                 fout.close();
                 fin.close();
                 resp.downloaded_size = static_cast<uint64_t>(total_sz);
diff --git a/src/client/partition_resolver.h b/src/client/partition_resolver.h
index 37ebda3b3..bd01230ea 100644
--- a/src/client/partition_resolver.h
+++ b/src/client/partition_resolver.h
@@ -73,6 +73,8 @@ public:
 
     dsn::rpc_address get_meta_server() const { return _meta_server; }
 
+    const char *log_prefix() const { return _app_name.c_str(); }
+
 protected:
     partition_resolver(rpc_address meta_server, const char *app_name)
         : _app_name(app_name), _meta_server(meta_server)
diff --git a/src/client/partition_resolver_simple.cpp 
b/src/client/partition_resolver_simple.cpp
index bcd631dbe..139f5fc55 100644
--- a/src/client/partition_resolver_simple.cpp
+++ b/src/client/partition_resolver_simple.cpp
@@ -105,7 +105,7 @@ partition_resolver_simple::~partition_resolver_simple()
 
 void partition_resolver_simple::clear_all_pending_requests()
 {
-    LOG_DEBUG("%s.client: clear all pending tasks", _app_name.c_str());
+    LOG_DEBUG_PREFIX("clear all pending tasks");
     zauto_lock l(_requests_lock);
     // clear _pending_requests
     for (auto &pc : _pending_requests) {
@@ -230,11 +230,8 @@ 
DEFINE_TASK_CODE_RPC(RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX,
 
 task_ptr partition_resolver_simple::query_config(int partition_index, int 
timeout_ms)
 {
-    LOG_DEBUG("%s.client: start query config, gpid = %d.%d, timeout_ms = %d",
-              _app_name.c_str(),
-              _app_id,
-              partition_index,
-              timeout_ms);
+    LOG_DEBUG_PREFIX(
+        "start query config, gpid = {}.{}, timeout_ms = {}", _app_id, 
partition_index, timeout_ms);
     task_spec *sp = task_spec::get(RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX);
     if (timeout_ms >= sp->rpc_timeout_milliseconds)
         timeout_ms = 0;
@@ -292,13 +289,10 @@ void 
partition_resolver_simple::query_config_reply(error_code err,
             for (auto it = resp.partitions.begin(); it != 
resp.partitions.end(); ++it) {
                 auto &new_config = *it;
 
-                LOG_DEBUG("%s.client: query config reply, gpid = %d.%d, ballot 
= %" PRId64
-                          ", primary = %s",
-                          _app_name.c_str(),
-                          new_config.pid.get_app_id(),
-                          new_config.pid.get_partition_index(),
-                          new_config.ballot,
-                          new_config.primary.to_string());
+                LOG_DEBUG_PREFIX("query config reply, gpid = {}, ballot = {}, 
primary = {}",
+                                 new_config.pid,
+                                 new_config.ballot,
+                                 new_config.primary);
 
                 auto it2 = 
_config_cache.find(new_config.pid.get_partition_index());
                 if (it2 == _config_cache.end()) {
diff --git a/src/failure_detector/failure_detector.cpp 
b/src/failure_detector/failure_detector.cpp
index 69164e284..bd89323bd 100644
--- a/src/failure_detector/failure_detector.cpp
+++ b/src/failure_detector/failure_detector.cpp
@@ -117,7 +117,7 @@ void failure_detector::register_master(::dsn::rpc_address 
target)
 
     auto ret = _masters.insert(std::make_pair(target, record));
     if (ret.second) {
-        LOG_DEBUG("register master[%s] successfully", target.to_string());
+        LOG_DEBUG_F("register master[{}] successfully", target);
         setup_timer = true;
     } else {
         // active the beacon again in case previously local node is not in 
target's allow list
@@ -125,7 +125,7 @@ void failure_detector::register_master(::dsn::rpc_address 
target)
             ret.first->second.rejected = false;
             setup_timer = true;
         }
-        LOG_DEBUG("master[%s] already registered", target.to_string());
+        LOG_DEBUG_F("master[{}] already registered", target);
     }
 
     if (setup_timer) {
@@ -518,9 +518,9 @@ void failure_detector::register_worker(::dsn::rpc_address 
target, bool is_connec
 
     auto ret = _workers.insert(std::make_pair(target, record));
     if (ret.second) {
-        LOG_DEBUG("register worker[%s] successfully", target.to_string());
+        LOG_DEBUG_F("register worker[{}] successfully", target);
     } else {
-        LOG_DEBUG("worker[%s] already registered", target.to_string());
+        LOG_DEBUG_F("worker[{}] already registered", target);
     }
 }
 
@@ -539,9 +539,7 @@ bool failure_detector::unregister_worker(::dsn::rpc_address 
node)
         ret = true;
     }
 
-    LOG_DEBUG("unregister worker[%s] successfully, removed entry count is %u",
-              node.to_string(),
-              (uint32_t)count);
+    LOG_DEBUG_F("unregister worker[{}] successfully, removed entry count is 
{}", node, count);
 
     return ret;
 }
diff --git a/src/failure_detector/test/failure_detector.cpp 
b/src/failure_detector/test/failure_detector.cpp
index 1fb19f0bd..b7b497150 100644
--- a/src/failure_detector/test/failure_detector.cpp
+++ b/src/failure_detector/test/failure_detector.cpp
@@ -68,7 +68,7 @@ protected:
         if (_send_ping_switch)
             failure_detector::send_beacon(node, time);
         else {
-            LOG_DEBUG("ignore send beacon, to node[%s], time[%" PRId64 "]", 
node.to_string(), time);
+            LOG_DEBUG_F("ignore send beacon, to node[{}], time[{}]", node, 
time);
         }
     }
 
@@ -118,10 +118,10 @@ public:
         if (_response_ping_switch)
             meta_server_failure_detector::on_ping(beacon, reply);
         else {
-            LOG_DEBUG("ignore on ping, beacon msg, time[%" PRId64 "], 
from[%s], to[%s]",
-                      beacon.time,
-                      beacon.from_addr.to_string(),
-                      beacon.to_addr.to_string());
+            LOG_DEBUG_F("ignore on ping, beacon msg, time[{}], from[{}], 
to[{}]",
+                        beacon.time,
+                        beacon.from_addr,
+                        beacon.to_addr);
         }
     }
 
@@ -180,9 +180,9 @@ public:
 
     void on_master_config(const config_master_message &request, bool &response)
     {
-        LOG_DEBUG("master config: request:%s, type:%s",
-                  request.master.to_string(),
-                  request.is_register ? "reg" : "unreg");
+        LOG_DEBUG_F("master config, request: {}, type: {}",
+                    request.master,
+                    request.is_register ? "reg" : "unreg");
         if (request.is_register)
             _worker_fd->register_master(request.master);
         else
@@ -220,7 +220,7 @@ public:
         }
 
         _master_fd->start(1, 1, 9, 10, use_allow_list);
-        LOG_DEBUG("%s", 
_master_fd->get_allow_list(std::vector<std::string>{}).c_str());
+        LOG_DEBUG_F("{}", _master_fd->get_allow_list({}));
         ++started_apps;
 
         return ERR_OK;
@@ -507,7 +507,7 @@ TEST(fd, old_master_died)
 
     worker->fd()->when_disconnected([](const std::vector<rpc_address> 
&masters_list) {
         ASSERT_EQ(masters_list.size(), 1);
-        LOG_DEBUG("disconnect from master: %s", masters_list[0].to_string());
+        LOG_DEBUG_F("disconnect from master: {}", masters_list[0]);
     });
 
     /*first let's stop the old master*/
diff --git a/src/meta/load_balance_policy.cpp b/src/meta/load_balance_policy.cpp
index cc1f8e6da..5738ce4ca 100644
--- a/src/meta/load_balance_policy.cpp
+++ b/src/meta/load_balance_policy.cpp
@@ -42,7 +42,7 @@ void dump_disk_load(app_id id, const rpc_address &node, bool 
only_primary, const
         load_string << kv.first << ": " << kv.second << std::endl;
     }
     load_string << ">>>>>>>>>>";
-    LOG_DEBUG("%s", load_string.str().c_str());
+    LOG_DEBUG_F("{}", load_string.str());
 }
 
 bool calc_disk_load(node_mapper &nodes,
@@ -57,10 +57,7 @@ bool calc_disk_load(node_mapper &nodes,
     CHECK_NOTNULL(ns, "can't find node({}) from node_state", node.to_string());
 
     auto add_one_replica_to_disk_load = [&](const gpid &pid) {
-        LOG_DEBUG("add gpid(%d.%d) to node(%s) disk load",
-                  pid.get_app_id(),
-                  pid.get_partition_index(),
-                  node.to_string());
+        LOG_DEBUG_F("add gpid({}) to node({}) disk load", pid, node);
         const config_context &cc = *get_config_context(apps, pid);
         auto iter = cc.find_from_serving(node);
         if (iter == cc.serving.end()) {
diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp
index 36d5ff1e4..1fb6faa7f 100644
--- a/src/meta/meta_backup_service.cpp
+++ b/src/meta/meta_backup_service.cpp
@@ -968,7 +968,7 @@ void policy_context::issue_gc_backup_info_task_unlocked()
     } else {
         // there is no extra backup to gc, we just issue a new task to call
         // issue_gc_backup_info_task_unlocked later
-        LOG_DEBUG("%s: no need to gc backup info, start it later", 
_policy.policy_name.c_str());
+        LOG_DEBUG_F("{}: no need to gc backup info, start it later", 
_policy.policy_name);
         tasking::create_task(LPC_DEFAULT_CALLBACK, &_tracker, [this]() {
             zauto_lock l(_lock);
             issue_gc_backup_info_task_unlocked();
@@ -1050,7 +1050,7 @@ backup_service::backup_service(meta_service *meta_svc,
 
 void backup_service::start_create_policy_meta_root(dsn::task_ptr callback)
 {
-    LOG_DEBUG("create policy meta root(%s) on remote_storage", 
_policy_meta_root.c_str());
+    LOG_DEBUG_F("create policy meta root({}) on remote_storage", 
_policy_meta_root);
     _meta_svc->get_remote_storage()->create_node(
         _policy_meta_root, LPC_DEFAULT_CALLBACK, [this, 
callback](dsn::error_code err) {
             if (err == dsn::ERR_OK || err == ERR_NODE_ALREADY_EXIST) {
@@ -1114,7 +1114,7 @@ error_code 
backup_service::sync_policies_from_remote_storage()
     auto init_backup_info = [this, &err, &tracker](const std::string 
&policy_name) {
         auto after_get_backup_info = [this, &err, policy_name](error_code ec, 
const blob &value) {
             if (ec == ERR_OK) {
-                LOG_DEBUG("sync a backup string(%s) from remote storage", 
value.data());
+                LOG_DEBUG_F("sync a backup string({}) from remote storage", 
value.data());
                 backup_info tbackup_info;
                 dsn::json::json_forwarder<backup_info>::decode(value, 
tbackup_info);
 
diff --git a/src/meta/meta_data.cpp b/src/meta/meta_data.cpp
index 47a0dbe41..e41315401 100644
--- a/src/meta/meta_data.cpp
+++ b/src/meta/meta_data.cpp
@@ -250,10 +250,8 @@ void proposal_actions::track_current_learner(const 
dsn::rpc_address &node, const
                          dsn::enum_to_string(info.status));
                 learning_progress_abnormal_detected = true;
             } else {
-                LOG_DEBUG("%d.%d: ignore abnormal status of %s, perhaps learn 
not start",
-                          info.pid.get_app_id(),
-                          info.pid.get_partition_index(),
-                          node.to_string());
+                LOG_DEBUG_F(
+                    "{}: ignore abnormal status of {}, perhaps learn not 
start", info.pid, node);
             }
         } else if (info.status == partition_status::PS_POTENTIAL_SECONDARY) {
             if (current_learner.ballot > info.ballot ||
diff --git a/src/meta/meta_server_failure_detector.cpp 
b/src/meta/meta_server_failure_detector.cpp
index 68d124a19..73f0a79b7 100644
--- a/src/meta/meta_server_failure_detector.cpp
+++ b/src/meta/meta_server_failure_detector.cpp
@@ -208,9 +208,8 @@ bool 
meta_server_failure_detector::update_stability_stat(const fd::beacon_msg &b
     } else {
         worker_stability &w = iter->second;
         if (beacon.start_time == w.last_start_time_ms) {
-            LOG_DEBUG("%s isn't restarted, last_start_time(%lld)",
-                      beacon.from_addr.to_string(),
-                      w.last_start_time_ms);
+            LOG_DEBUG_F(
+                "{} isn't restarted, last_start_time({})", beacon.from_addr, 
w.last_start_time_ms);
             if (dsn_now_ms() - w.last_start_time_ms >=
                     _fd_opts->stable_rs_min_running_seconds * 1000 &&
                 w.unstable_restart_count > 0) {
diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp
index 8e2a80c5c..b25dce51f 100644
--- a/src/meta/meta_service.cpp
+++ b/src/meta/meta_service.cpp
@@ -487,7 +487,7 @@ int meta_service::check_leader(dsn::message_ex *req, 
dsn::rpc_address *forward_a
             return -1;
         }
 
-        LOG_DEBUG("leader address: %s", leader.to_string());
+        LOG_DEBUG_F("leader address: {}", leader);
         if (!leader.is_invalid()) {
             dsn_rpc_forward(req, leader);
             return 0;
diff --git a/src/meta/meta_service.h b/src/meta/meta_service.h
index f41819cb9..e89aacaaa 100644
--- a/src/meta/meta_service.h
+++ b/src/meta/meta_service.h
@@ -352,7 +352,7 @@ int meta_service::check_leader(TRpcHolder rpc, rpc_address 
*forward_address)
             return -1;
         }
 
-        LOG_DEBUG("leader address: %s", leader.to_string());
+        LOG_DEBUG_F("leader address: {}", leader);
         if (!leader.is_invalid()) {
             rpc.forward(leader);
             return 0;
diff --git a/src/meta/meta_state_service_simple.cpp 
b/src/meta/meta_state_service_simple.cpp
index fdc7af2bc..c06009191 100644
--- a/src/meta/meta_state_service_simple.cpp
+++ b/src/meta/meta_state_service_simple.cpp
@@ -198,7 +198,7 @@ error_code 
meta_state_service_simple::set_data_internal(const std::string &node,
 error_code meta_state_service_simple::apply_transaction(
     const std::shared_ptr<meta_state_service::transaction_entries> &t_entries)
 {
-    LOG_DEBUG("internal operation after logged");
+    LOG_DEBUG_F("internal operation after logged");
     simple_transaction_entries *entries =
         dynamic_cast<simple_transaction_entries *>(t_entries.get());
     CHECK_NOTNULL(entries, "invalid input parameter");
diff --git a/src/meta/meta_state_service_zookeeper.cpp 
b/src/meta/meta_state_service_zookeeper.cpp
index f74f8641d..14627ab3f 100644
--- a/src/meta/meta_state_service_zookeeper.cpp
+++ b/src/meta/meta_state_service_zookeeper.cpp
@@ -206,7 +206,7 @@ task_ptr meta_state_service_zookeeper::create_node(const 
std::string &node,
 {
     error_code_future_ptr tsk(new error_code_future(cb_code, cb_create, 0));
     tsk->set_tracker(tracker);
-    LOG_DEBUG("call create, node(%s)", node.c_str());
+    LOG_DEBUG_F("call create, node({})", node);
     VISIT_INIT(tsk, zookeeper_session::ZOO_OPERATION::ZOO_CREATE, node);
     input->_value = value;
     input->_flags = 0;
@@ -223,7 +223,7 @@ task_ptr meta_state_service_zookeeper::submit_transaction(
 {
     error_code_future_ptr tsk(new error_code_future(cb_code, cb_transaction, 
0));
     tsk->set_tracker(tracker);
-    LOG_DEBUG("call submit batch");
+    LOG_DEBUG_F("call submit batch");
     zookeeper_session::zoo_opcontext *op = zookeeper_session::create_context();
     zookeeper_session::zoo_input *input = &op->_input;
     op->_callback_function = 
std::bind(&meta_state_service_zookeeper::visit_zookeeper_internal,
@@ -246,7 +246,7 @@ task_ptr 
meta_state_service_zookeeper::delete_empty_node(const std::string &node
 {
     error_code_future_ptr tsk(new error_code_future(cb_code, cb_delete, 0));
     tsk->set_tracker(tracker);
-    LOG_DEBUG("call delete, node(%s)", node.c_str());
+    LOG_DEBUG_F("call delete, node({})", node);
     VISIT_INIT(tsk, zookeeper_session::ZOO_OPERATION::ZOO_DELETE, node);
     _session->visit(op);
     return tsk;
@@ -313,7 +313,7 @@ task_ptr meta_state_service_zookeeper::get_data(const 
std::string &node,
 {
     err_value_future_ptr tsk(new err_value_future(cb_code, cb_get_data, 0));
     tsk->set_tracker(tracker);
-    LOG_DEBUG("call get, node(%s)", node.c_str());
+    LOG_DEBUG_F("call get, node({})", node);
     VISIT_INIT(tsk, zookeeper_session::ZOO_OPERATION::ZOO_GET, node);
     input->_is_set_watch = 0;
     _session->visit(op);
@@ -328,7 +328,7 @@ task_ptr meta_state_service_zookeeper::set_data(const 
std::string &node,
 {
     error_code_future_ptr tsk(new error_code_future(cb_code, cb_set_data, 0));
     tsk->set_tracker(tracker);
-    LOG_DEBUG("call set, node(%s)", node.c_str());
+    LOG_DEBUG_F("call set, node({})", node);
     VISIT_INIT(tsk, zookeeper_session::ZOO_OPERATION::ZOO_SET, node);
 
     input->_value = value;
@@ -343,7 +343,7 @@ task_ptr meta_state_service_zookeeper::node_exist(const 
std::string &node,
 {
     error_code_future_ptr tsk(new error_code_future(cb_code, cb_exist, 0));
     tsk->set_tracker(tracker);
-    LOG_DEBUG("call node_exist, node(%s)", node.c_str());
+    LOG_DEBUG_F("call node_exist, node({})", node);
     VISIT_INIT(tsk, zookeeper_session::ZOO_OPERATION::ZOO_EXISTS, node);
     input->_is_set_watch = 0;
     _session->visit(op);
@@ -357,7 +357,7 @@ task_ptr meta_state_service_zookeeper::get_children(const 
std::string &node,
 {
     err_stringv_future_ptr tsk(new err_stringv_future(cb_code, 
cb_get_children, 0));
     tsk->set_tracker(tracker);
-    LOG_DEBUG("call get children, node(%s)", node.c_str());
+    LOG_DEBUG_F("call get children, node({})", node);
     VISIT_INIT(tsk, zookeeper_session::ZOO_OPERATION::ZOO_GETCHILDREN, node);
     input->_is_set_watch = 0;
     _session->visit(op);
@@ -388,8 +388,8 @@ void 
meta_state_service_zookeeper::visit_zookeeper_internal(ref_this,
 {
     zookeeper_session::zoo_opcontext *op =
         reinterpret_cast<zookeeper_session::zoo_opcontext *>(result);
-    LOG_DEBUG(
-        "visit zookeeper internal: ans(%s), call type(%d)", 
zerror(op->_output.error), op->_optype);
+    LOG_DEBUG_F(
+        "visit zookeeper internal: ans({}), call type({})", 
zerror(op->_output.error), op->_optype);
 
     switch (op->_optype) {
     case zookeeper_session::ZOO_OPERATION::ZOO_CREATE:
diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp
index 2c81cb881..5a8fbe1ed 100644
--- a/src/meta/server_state.cpp
+++ b/src/meta/server_state.cpp
@@ -283,10 +283,10 @@ void 
server_state::process_one_partition(std::shared_ptr<app_state> &app)
 {
     int ans = --app->helpers->partitions_in_progress;
     if (ans > 0) {
-        LOG_DEBUG("app(%s) in status %s, can't transfer to stable state as 
some partition is in "
-                  "progressing",
-                  app->get_logname(),
-                  enum_to_string(app->status));
+        LOG_DEBUG_F("app({}) in status {}, can't transfer to stable state as 
some partition is in "
+                    "progressing",
+                    app->get_logname(),
+                    enum_to_string(app->status));
         return;
     } else if (ans == 0) {
         transition_staging_state(app);
@@ -858,10 +858,7 @@ void 
server_state::on_config_sync(configuration_query_by_node_rpc rpc)
             // the app is deleted but not expired, we need to ignore it
             // if the app is deleted and expired, we need to gc it
             for (const replica_info &rep : replicas) {
-                LOG_DEBUG("receive stored replica from %s, pid(%d.%d)",
-                          request.node.to_string(),
-                          rep.pid.get_app_id(),
-                          rep.pid.get_partition_index());
+                LOG_DEBUG_F("receive stored replica from {}, pid({})", 
request.node, rep.pid);
                 std::shared_ptr<app_state> app = get_app(rep.pid.get_app_id());
                 if (app == nullptr || rep.pid.get_partition_index() >= 
app->partition_count) {
                     // This app has garbage partition after cancel split, the 
canceled child
@@ -1014,8 +1011,7 @@ void 
server_state::init_app_partition_node(std::shared_ptr<app_state> &app,
                                            task_ptr callback)
 {
     auto on_create_app_partition = [this, pidx, app, callback](error_code ec) 
mutable {
-        LOG_DEBUG(
-            "create partition node: gpid(%d.%d), result: %s", app->app_id, 
pidx, ec.to_string());
+        LOG_DEBUG_F("create partition node: gpid({}.{}), result: {}", 
app->app_id, pidx, ec);
         if (ERR_OK == ec || ERR_NODE_ALREADY_EXIST == ec) {
             {
                 zauto_write_lock l(_lock);
@@ -1054,7 +1050,7 @@ void 
server_state::do_app_create(std::shared_ptr<app_state> &app)
 {
     auto on_create_app_root = [this, app](error_code ec) mutable {
         if (ERR_OK == ec || ERR_NODE_ALREADY_EXIST == ec) {
-            LOG_DEBUG("create app(%s) on storage service ok", 
app->get_logname());
+            LOG_DEBUG_F("create app({}) on storage service ok", 
app->get_logname());
             for (unsigned int i = 0; i != app->partition_count; ++i) {
                 init_app_partition_node(app, i, nullptr);
             }
@@ -1183,7 +1179,7 @@ void server_state::do_app_drop(std::shared_ptr<app_state> 
&app)
                 drop_partition(app, i);
             }
         } else if (ERR_TIMEOUT == ec) {
-            LOG_DEBUG("drop app(%s) prepare timeout, continue to drop later", 
app->get_logname());
+            LOG_DEBUG_F("drop app({}) prepare timeout, continue to drop 
later", app->get_logname());
             tasking::enqueue(LPC_META_STATE_HIGH,
                              tracker(),
                              std::bind(&server_state::do_app_drop, this, app),
@@ -1408,7 +1404,7 @@ void server_state::recall_app(dsn::message_ex *msg)
 void server_state::list_apps(const configuration_list_apps_request &request,
                              configuration_list_apps_response &response)
 {
-    LOG_DEBUG("list app request, status(%d)", request.status);
+    LOG_DEBUG_F("list app request, status({})", request.status);
     zauto_read_lock l(_lock);
     for (auto &kv : _all_apps) {
         app_state &app = *(kv.second);
@@ -2009,7 +2005,7 @@ void 
server_state::on_partition_node_dead(std::shared_ptr<app_state> &app,
 
 void server_state::on_change_node_state(rpc_address node, bool is_alive)
 {
-    LOG_DEBUG("change node(%s) state to %s", node.to_string(), is_alive ? 
"alive" : "dead");
+    LOG_DEBUG_F("change node({}) state to {}", node, is_alive ? "alive" : 
"dead");
     zauto_write_lock l(_lock);
     if (!is_alive) {
         auto iter = _nodes.find(node);
@@ -2489,10 +2485,7 @@ bool server_state::check_all_partitions()
                 configuration_proposal_action action;
                 pc_status s = _meta_svc->get_partition_guardian()->cure(
                     {&_all_apps, &_nodes}, pc.pid, action);
-                LOG_DEBUG("gpid(%d.%d) is in status(%s)",
-                          pc.pid.get_app_id(),
-                          pc.pid.get_partition_index(),
-                          enum_to_string(s));
+                LOG_DEBUG_F("gpid({}) is in status({})", pc.pid, 
enum_to_string(s));
                 if (pc_status::healthy != s) {
                     if (action.type != config_type::CT_INVALID) {
                         if (action.type == config_type::CT_ADD_SECONDARY ||
diff --git a/src/meta/test/balancer_simulator/balancer_simulator.cpp 
b/src/meta/test/balancer_simulator/balancer_simulator.cpp
index 2efd6b2aa..0b8b830e9 100644
--- a/src/meta/test/balancer_simulator/balancer_simulator.cpp
+++ b/src/meta/test/balancer_simulator/balancer_simulator.cpp
@@ -164,7 +164,7 @@ void greedy_balancer_perfect_move_primary()
     migration_list ml;
 
     glb.check({&apps, &nodes}, ml);
-    LOG_DEBUG("balance checker operation count = %d", ml.size());
+    LOG_DEBUG_F("balance checker operation count = {}", ml.size());
     int i = 0;
 
     while (glb.balance({&apps, &nodes}, ml)) {
@@ -175,7 +175,7 @@ void greedy_balancer_perfect_move_primary()
             }
         }
         glb.check({&apps, &nodes}, ml);
-        LOG_DEBUG("round %d: balance checker operation count = %d", ++i, 
ml.size());
+        LOG_DEBUG_F("round {}: balance checker operation count = {}", ++i, 
ml.size());
     }
 }
 
diff --git a/src/meta/test/balancer_validator.cpp 
b/src/meta/test/balancer_validator.cpp
index 602ab87df..1533da7cc 100644
--- a/src/meta/test/balancer_validator.cpp
+++ b/src/meta/test/balancer_validator.cpp
@@ -239,25 +239,25 @@ void meta_service_test_app::balancer_validator()
     migration_list ml;
 
     for (auto &iter : nodes) {
-        LOG_DEBUG("node(%s) have %d primaries, %d partitions",
-                  iter.first.to_string(),
-                  iter.second.primary_count(),
-                  iter.second.partition_count());
+        LOG_DEBUG_F("node({}) have {} primaries, {} partitions",
+                    iter.first,
+                    iter.second.primary_count(),
+                    iter.second.partition_count());
     }
 
     // iterate 1000000 times
     for (int i = 0; i < 1000000 && glb.balance({&apps, &nodes}, ml); ++i) {
-        LOG_DEBUG("the %dth round of balancer", i);
+        LOG_DEBUG_F("the {}th round of balancer", i);
         migration_check_and_apply(apps, nodes, ml, &manager);
         glb.check({&apps, &nodes}, ml);
-        LOG_DEBUG("balance checker operation count = %d", ml.size());
+        LOG_DEBUG_F("balance checker operation count = {}", ml.size());
     }
 
     for (auto &iter : nodes) {
-        LOG_DEBUG("node(%s) have %d primaries, %d partitions",
-                  iter.first.to_string(),
-                  iter.second.primary_count(),
-                  iter.second.partition_count());
+        LOG_DEBUG_F("node({}) have {} primaries, {} partitions",
+                    iter.first,
+                    iter.second.primary_count(),
+                    iter.second.partition_count());
     }
 
     std::shared_ptr<app_state> &the_app = apps[1];
@@ -345,10 +345,10 @@ void meta_service_test_app::balance_config_file()
 
         // iterate 1000 times
         for (int i = 0; i < 1000 && lb->balance({&apps, &nodes}, ml); ++i) {
-            LOG_DEBUG("the %dth round of balancer", i);
+            LOG_DEBUG_F("the {}th round of balancer", i);
             migration_check_and_apply(apps, nodes, ml, nullptr);
             lb->check({&apps, &nodes}, ml);
-            LOG_DEBUG("balance checker operation count = %d", ml.size());
+            LOG_DEBUG_F("balance checker operation count = {}", ml.size());
         }
     }
 }
diff --git a/src/meta/test/misc/misc.cpp b/src/meta/test/misc/misc.cpp
index 8b3a38374..53ffd94bb 100644
--- a/src/meta/test/misc/misc.cpp
+++ b/src/meta/test/misc/misc.cpp
@@ -163,7 +163,7 @@ void generate_apps(/*out*/ dsn::replication::app_mapper 
&mapper,
         if (generate_serving_info) {
             generate_app_serving_replica_info(the_app, disks_per_node);
         }
-        LOG_DEBUG("generated app, partitions(%d)", info.partition_count);
+        LOG_DEBUG_F("generated app, partitions({})", info.partition_count);
         mapper.emplace(the_app->app_id, the_app);
     }
 }
@@ -201,7 +201,7 @@ void generate_node_fs_manager(const app_mapper &apps,
                      cc.find_from_serving(ns.addr())->disk_tag.c_str(),
                      pid.get_app_id(),
                      pid.get_partition_index());
-            LOG_DEBUG("concat pid_dir(%s) of node(%s)", pid_dir, 
ns.addr().to_string());
+            LOG_DEBUG_F("concat pid_dir({}) of node({})", pid_dir, ns.addr());
             manager.add_replica(pid, pid_dir);
             return true;
         });
@@ -356,10 +356,7 @@ void migration_check_and_apply(app_mapper &apps,
     int i = 0;
     for (auto kv = ml.begin(); kv != ml.end(); ++kv) {
         std::shared_ptr<configuration_balancer_request> &proposal = kv->second;
-        LOG_DEBUG("the %dth round of proposal, gpid(%d.%d)",
-                  i++,
-                  proposal->gpid.get_app_id(),
-                  proposal->gpid.get_partition_index());
+        LOG_DEBUG_F("the {}th round of proposal, gpid({})", i++, 
proposal->gpid);
         std::shared_ptr<app_state> &the_app = 
apps.find(proposal->gpid.get_app_id())->second;
 
         CHECK_EQ(proposal->gpid.get_app_id(), the_app->app_id);
@@ -376,11 +373,11 @@ void migration_check_and_apply(app_mapper &apps,
 
         for (unsigned int j = 0; j < proposal->action_list.size(); ++j) {
             configuration_proposal_action &act = proposal->action_list[j];
-            LOG_DEBUG("the %dth round of action, type: %s, node: %s, target: 
%s",
-                      j,
-                      dsn::enum_to_string(act.type),
-                      act.node.to_string(),
-                      act.target.to_string());
+            LOG_DEBUG_F("the {}th round of action, type: {}, node: {}, target: 
{}",
+                        j,
+                        dsn::enum_to_string(act.type),
+                        act.node,
+                        act.target);
             proposal_action_check_and_apply(act, proposal->gpid, apps, nodes, 
manager);
         }
     }
diff --git a/src/nfs/nfs_server_impl.cpp b/src/nfs/nfs_server_impl.cpp
index 5a90bf4d2..645ef2d8a 100644
--- a/src/nfs/nfs_server_impl.cpp
+++ b/src/nfs/nfs_server_impl.cpp
@@ -74,8 +74,6 @@ nfs_service_impl::nfs_service_impl() : 
::dsn::serverlet<nfs_service_impl>("nfs")
 void nfs_service_impl::on_copy(const ::dsn::service::copy_request &request,
                                
::dsn::rpc_replier<::dsn::service::copy_response> &reply)
 {
-    // LOG_DEBUG(">>> on call RPC_COPY end, exec RPC_NFS_COPY");
-
     std::string file_path =
         dsn::utils::filesystem::path_combine(request.source_dir, 
request.file_name);
     disk_file *dfile = nullptr;
@@ -100,10 +98,8 @@ void nfs_service_impl::on_copy(const 
::dsn::service::copy_request &request,
         }
     }
 
-    LOG_DEBUG("nfs: copy file %s [%" PRId64 ", %" PRId64 ")",
-              file_path.c_str(),
-              request.offset,
-              request.offset + request.size);
+    LOG_DEBUG_F(
+        "nfs: copy file {} [{}, {}]", file_path, request.offset, 
request.offset + request.size);
 
     if (dfile == nullptr) {
         LOG_ERROR("{nfs_service} open file %s failed", file_path.c_str());
@@ -173,8 +169,6 @@ void nfs_service_impl::on_get_file_size(
     const ::dsn::service::get_file_size_request &request,
     ::dsn::rpc_replier<::dsn::service::get_file_size_response> &reply)
 {
-    // LOG_DEBUG(">>> on call RPC_NFS_GET_FILE_SIZE end, exec 
RPC_NFS_GET_FILE_SIZE");
-
     get_file_size_response resp;
     error_code err = ERR_OK;
     std::vector<std::string> file_list;
@@ -246,7 +240,7 @@ void nfs_service_impl::close_file() // release out-of-date 
file handle
         // not used and expired
         if (fptr->file_access_count == 0 &&
             dsn_now_ms() - fptr->last_access_time > 
(uint64_t)FLAGS_file_close_expire_time_ms) {
-            LOG_DEBUG("nfs: close file handle %s", it->first.c_str());
+            LOG_DEBUG_F("nfs: close file handle {}", it->first);
             it = _handles_map.erase(it);
         } else
             it++;
diff --git a/src/perf_counter/perf_counters.cpp 
b/src/perf_counter/perf_counters.cpp
index c85bd8785..6ce274118 100644
--- a/src/perf_counter/perf_counters.cpp
+++ b/src/perf_counter/perf_counters.cpp
@@ -218,7 +218,7 @@ perf_counter_ptr perf_counters::get_global_counter(const 
char *app,
     }
 }
 
-bool perf_counters::remove_counter(const char *full_name)
+bool perf_counters::remove_counter(const std::string &full_name)
 {
     int remain_ref;
     {
@@ -235,7 +235,8 @@ bool perf_counters::remove_counter(const char *full_name)
         }
     }
 
-    LOG_DEBUG("performance counter %s is removed, remaining reference (%d)", 
full_name, remain_ref);
+    LOG_DEBUG_F(
+        "performance counter {} is removed, remaining reference ({})", 
full_name, remain_ref);
     return true;
 }
 
diff --git a/src/perf_counter/perf_counters.h b/src/perf_counter/perf_counters.h
index 763963ad7..a72470943 100644
--- a/src/perf_counter/perf_counters.h
+++ b/src/perf_counter/perf_counters.h
@@ -71,7 +71,7 @@ public:
     ///
     /// please call remove_counter if a previous 
get_app_counter/get_global_counter is called
     ///
-    bool remove_counter(const char *full_name);
+    bool remove_counter(const std::string &full_name);
 
     perf_counter_ptr get_counter(const std::string &full_name);
 
diff --git a/src/redis_protocol/proxy_lib/proxy_layer.h 
b/src/redis_protocol/proxy_lib/proxy_layer.h
index 87f4dc22f..d970d09a7 100644
--- a/src/redis_protocol/proxy_lib/proxy_layer.h
+++ b/src/redis_protocol/proxy_lib/proxy_layer.h
@@ -70,6 +70,8 @@ protected:
     virtual bool parse(dsn::message_ex *msg) = 0;
     dsn::message_ex *create_response();
 
+    const char *log_prefix() const { return _remote_address.to_string(); }
+
 protected:
     proxy_stub *_stub;
     std::atomic_bool _is_session_reset;
diff --git a/src/redis_protocol/proxy_lib/redis_parser.cpp 
b/src/redis_protocol/proxy_lib/redis_parser.cpp
index 08b76ee5e..e85fedb00 100644
--- a/src/redis_protocol/proxy_lib/redis_parser.cpp
+++ b/src/redis_protocol/proxy_lib/redis_parser.cpp
@@ -258,8 +258,7 @@ void redis_parser::append_message(dsn::message_ex *msg)
     msg->add_ref();
     _recv_buffers.push(msg);
     _total_length += msg->body_size();
-    LOG_DEBUG_F(
-        "{}: recv message, currently total length: {}", 
_remote_address.to_string(), _total_length);
+    LOG_DEBUG_PREFIX("recv message, currently total length: {}", 
_total_length);
 }
 
 // refererence: http://redis.io/topics/protocol
@@ -436,8 +435,7 @@ void redis_parser::set_internal(redis_parser::message_entry 
&entry)
 
         // with a reference to prevent the object from being destroyed
         std::shared_ptr<proxy_session> ref_this = shared_from_this();
-        LOG_DEBUG(
-            "%s: send set command(%" PRId64 ")", _remote_address.to_string(), 
entry.sequence_id);
+        LOG_DEBUG_PREFIX("send set command({})", entry.sequence_id);
         auto on_set_reply = [ref_this, this, &entry](
             ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex 
*response) {
             // when the "is_session_reset" flag is set, the socket may be 
broken.
@@ -454,9 +452,7 @@ void redis_parser::set_internal(redis_parser::message_entry 
&entry)
             //
             // currently we only clear an entry when it is replied or
             // in the redis_parser's destructor
-            LOG_DEBUG("%s: set command seqid(%" PRId64 ") got reply",
-                      _remote_address.to_string(),
-                      entry.sequence_id);
+            LOG_DEBUG_PREFIX("set command seqid({}) got reply", 
entry.sequence_id);
             if (::dsn::ERR_OK != ec) {
                 LOG_INFO("%s: set command seqid(%" PRId64 ") got reply with 
error = %s",
                          _remote_address.to_string(),
@@ -539,9 +535,7 @@ void redis_parser::setex(message_entry &entry)
                  entry.sequence_id);
         simple_error_reply(entry, "wrong number of arguments for 'setex' 
command");
     } else {
-        LOG_DEBUG("%s: send setex command seqid(%" PRId64 ")",
-                  _remote_address.to_string(),
-                  entry.sequence_id);
+        LOG_DEBUG_PREFIX("send setex command seqid({})", entry.sequence_id);
         ::dsn::blob &ttl_blob = redis_req.sub_requests[2].data;
         int ttl_seconds;
         if (!dsn::buf2int32(ttl_blob, ttl_seconds)) {
@@ -605,9 +599,7 @@ void redis_parser::get(message_entry &entry)
                  entry.sequence_id);
         simple_error_reply(entry, "wrong number of arguments for 'get' 
command");
     } else {
-        LOG_DEBUG("%s: send get command seqid(%" PRId64 ")",
-                  _remote_address.to_string(),
-                  entry.sequence_id);
+        LOG_DEBUG_PREFIX("send get command seqid({})", entry.sequence_id);
         std::shared_ptr<proxy_session> ref_this = shared_from_this();
         auto on_get_reply = [ref_this, this, &entry](
             ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex 
*response) {
@@ -666,9 +658,7 @@ void redis_parser::del_internal(message_entry &entry)
                  entry.sequence_id);
         simple_error_reply(entry, "wrong number of arguments for 'del' 
command");
     } else {
-        LOG_DEBUG("%s: send del command seqid(%" PRId64 ")",
-                  _remote_address.to_string(),
-                  entry.sequence_id);
+        LOG_DEBUG_PREFIX("send del command seqid({})", entry.sequence_id);
         std::shared_ptr<proxy_session> ref_this = shared_from_this();
         auto on_del_reply = [ref_this, this, &entry](
             ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex 
*response) {
@@ -679,9 +669,7 @@ void redis_parser::del_internal(message_entry &entry)
                 return;
             }
 
-            LOG_DEBUG("%s: del command seqid(%" PRId64 ") got reply",
-                      _remote_address.to_string(),
-                      entry.sequence_id);
+            LOG_DEBUG_PREFIX("del command seqid({}) got reply", 
entry.sequence_id);
             if (::dsn::ERR_OK != ec) {
                 LOG_INFO("%s: del command seqid(%" PRId64 ") got reply with 
error = %s",
                          _remote_address.to_string(),
@@ -761,9 +749,7 @@ void redis_parser::ttl(message_entry &entry)
         simple_error_reply(
             entry, fmt::format("wrong number of arguments for '{}'", is_ttl ? 
"ttl" : "pttl"));
     } else {
-        LOG_DEBUG("%s: send pttl/ttl command seqid(%" PRId64 ")",
-                  _remote_address.to_string(),
-                  entry.sequence_id);
+        LOG_DEBUG_PREFIX("send pttl/ttl command seqid({})", entry.sequence_id);
         std::shared_ptr<proxy_session> ref_this = shared_from_this();
         auto on_ttl_reply = [ref_this, this, &entry, is_ttl](
             ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex 
*response) {
@@ -774,9 +760,7 @@ void redis_parser::ttl(message_entry &entry)
                 return;
             }
 
-            LOG_DEBUG("%s: ttl/pttl command seqid(%" PRId64 ") got reply",
-                      _remote_address.to_string(),
-                      entry.sequence_id);
+            LOG_DEBUG_PREFIX("ttl/pttl command seqid({}) got reply", 
entry.sequence_id);
             if (::dsn::ERR_OK != ec) {
                 LOG_INFO("%s: del command seqid(%" PRId64 ") got reply with 
error = %s",
                          _remote_address.to_string(),
@@ -1324,9 +1308,7 @@ void 
redis_parser::handle_command(std::unique_ptr<message_entry> &&entry)
     e.sequence_id = ++s_next_seqid;
     e.response.store(nullptr, std::memory_order_relaxed);
 
-    LOG_DEBUG("%s: new command parsed with new seqid %" PRId64 "",
-              _remote_address.to_string(),
-              e.sequence_id);
+    LOG_DEBUG_PREFIX("new command parsed with new seqid {}", e.sequence_id);
     enqueue_pending_response(std::move(entry));
 
     CHECK_GT_MSG(request.sub_request_count, 0, "invalid request");
diff --git a/src/replica/mutation.cpp b/src/replica/mutation.cpp
index c60ddcc79..504baaad6 100644
--- a/src/replica/mutation.cpp
+++ b/src/replica/mutation.cpp
@@ -363,10 +363,9 @@ mutation_ptr mutation_queue::add_work(task_code code, 
dsn::message_ex *request,
         _pending_mutation = r->new_mutation(invalid_decree);
     }
 
-    LOG_DEBUG("add request with trace_id = %016" PRIx64
-              " into mutation with mutation_tid = %" PRIu64,
-              request->header->trace_id,
-              _pending_mutation->tid());
+    LOG_DEBUG_F("add request with trace_id = {:#018x} into mutation with 
mutation_tid = {}",
+                request->header->trace_id,
+                _pending_mutation->tid());
 
     _pending_mutation->add_client_request(code, request);
 
diff --git a/src/replica/mutation_log.cpp b/src/replica/mutation_log.cpp
index 7530648c9..31850e477 100644
--- a/src/replica/mutation_log.cpp
+++ b/src/replica/mutation_log.cpp
@@ -701,7 +701,7 @@ void mutation_log::close()
         _is_opened = false;
     }
 
-    LOG_DEBUG("close mutation log %s", dir().c_str());
+    LOG_DEBUG_F("close mutation log {}", dir());
 
     // make all data is on disk
     flush();
@@ -1481,26 +1481,24 @@ int mutation_log::garbage_collection(const 
replica_log_info_map &gc_condition,
                     CHECK(valid_start_offset == 0 || valid_start_offset >= 
log->end_offset(),
                           "valid start offset must be 0 or greater than the 
end of this log file");
 
-                    LOG_DEBUG("gc @ %d.%d: max_decree for %s is missing vs %" 
PRId64
-                              " as garbage max decree,"
-                              " safe to delete this and all older logs for 
this replica",
-                              gpid.get_app_id(),
-                              gpid.get_partition_index(),
-                              log->path().c_str(),
-                              garbage_max_decree);
+                    LOG_DEBUG_F(
+                        "gc @ {}: max_decree for {} is missing vs {} as 
garbage max decree, it's "
+                        "safe to delete this and all older logs for this 
replica",
+                        gpid,
+                        log->path(),
+                        garbage_max_decree);
                     delete_ok_for_this_replica = true;
                     kickout_this_replica = true;
                 }
 
                 // log is invalid for this replica, ok to delete
                 else if (log->end_offset() <= valid_start_offset) {
-                    LOG_DEBUG(
-                        "gc @ %d.%d: log is invalid for %s, as"
-                        " valid start offset vs log end offset = %" PRId64 " 
vs %" PRId64 ","
-                        " it is therefore safe to delete this and all older 
logs for this replica",
-                        gpid.get_app_id(),
-                        gpid.get_partition_index(),
-                        log->path().c_str(),
+                    LOG_DEBUG_F(
+                        "gc @ {}: log is invalid for {}, as valid start offset 
vs log end offset = "
+                        "{} vs {}, it is therefore safe to delete this and all 
older logs for this "
+                        "replica",
+                        gpid,
+                        log->path(),
                         valid_start_offset,
                         log->end_offset());
                     delete_ok_for_this_replica = true;
@@ -1509,13 +1507,11 @@ int mutation_log::garbage_collection(const 
replica_log_info_map &gc_condition,
 
                 // all decrees are no more than garbage max decree, ok to 
delete
                 else if (it3->second.max_decree <= garbage_max_decree) {
-                    LOG_DEBUG(
-                        "gc @ %d.%d: max_decree for %s is %" PRId64 " vs %" 
PRId64
-                        " as garbage max decree,"
-                        " it is therefore safe to delete this and all older 
logs for this replica",
-                        gpid.get_app_id(),
-                        gpid.get_partition_index(),
-                        log->path().c_str(),
+                    LOG_DEBUG_F(
+                        "gc @ {}: max_decree for {} is {} vs {} as garbage max 
decree, it is "
+                        "therefore safe to delete this and all older logs for 
this replica",
+                        gpid,
+                        log->path(),
                         it3->second.max_decree,
                         garbage_max_decree);
                     delete_ok_for_this_replica = true;
@@ -1525,14 +1521,12 @@ int mutation_log::garbage_collection(const 
replica_log_info_map &gc_condition,
                 else // it3->second.max_decree > garbage_max_decree
                 {
                     // should not delete this file
-                    LOG_DEBUG("gc @ %d.%d: max_decree for %s is %" PRId64 " vs 
%" PRId64
-                              " as garbage max decree,"
-                              " it is therefore not allowed to delete this and 
all older logs",
-                              gpid.get_app_id(),
-                              gpid.get_partition_index(),
-                              log->path().c_str(),
-                              it3->second.max_decree,
-                              garbage_max_decree);
+                    LOG_DEBUG_F("gc @ {}: max_decree for {} is {} vs {} as 
garbage max decree, it "
+                                "is therefore not allowed to delete this and 
all older logs",
+                                gpid,
+                                log->path(),
+                                it3->second.max_decree,
+                                garbage_max_decree);
                     prevent_gc_replicas_for_this_log.insert(gpid);
                     decree gap = it3->second.max_decree - garbage_max_decree;
                     if (log->index() < stop_gc_log_index || gap > 
stop_gc_decree_gap) {
diff --git a/src/replica/mutation_log_replay.cpp 
b/src/replica/mutation_log_replay.cpp
index 1131b29ab..dbad94f10 100644
--- a/src/replica/mutation_log_replay.cpp
+++ b/src/replica/mutation_log_replay.cpp
@@ -124,7 +124,7 @@ namespace replication {
         if (log == nullptr) {
             if (err == ERR_HANDLE_EOF || err == ERR_INCOMPLETE_DATA ||
                 err == ERR_INVALID_PARAMETERS) {
-                LOG_DEBUG("skip file %s during log replay", fpath.c_str());
+                LOG_DEBUG_F("skip file {} during log replay", fpath);
                 continue;
             } else {
                 return err;
diff --git a/src/replica/replica.cpp b/src/replica/replica.cpp
index 91c1a6f7c..5d0f546d7 100644
--- a/src/replica/replica.cpp
+++ b/src/replica/replica.cpp
@@ -197,7 +197,7 @@ replica::~replica(void)
 {
     close();
     _prepare_list = nullptr;
-    LOG_DEBUG("%s: replica destroyed", name());
+    LOG_DEBUG_PREFIX("replica destroyed");
 }
 
 void replica::on_client_read(dsn::message_ex *request, bool ignore_throttling)
@@ -285,10 +285,8 @@ void replica::check_state_completeness()
 
 void replica::execute_mutation(mutation_ptr &mu)
 {
-    LOG_DEBUG("%s: execute mutation %s: request_count = %u",
-              name(),
-              mu->name(),
-              static_cast<int>(mu->client_requests.size()));
+    LOG_DEBUG_PREFIX(
+        "execute mutation {}: request_count = {}", mu->name(), 
mu->client_requests.size());
 
     error_code err = ERR_OK;
     decree d = mu->data.header.decree;
@@ -298,11 +296,10 @@ void replica::execute_mutation(mutation_ptr &mu)
         if (_app->last_committed_decree() + 1 == d) {
             err = _app->apply_mutation(mu);
         } else {
-            LOG_DEBUG("%s: mutation %s commit to %s skipped, 
app.last_committed_decree = %" PRId64,
-                      name(),
-                      mu->name(),
-                      enum_to_string(status()),
-                      _app->last_committed_decree());
+            LOG_DEBUG_PREFIX("mutation {} commit to {} skipped, 
app.last_committed_decree = {}",
+                             mu->name(),
+                             enum_to_string(status()),
+                             _app->last_committed_decree());
         }
         break;
     case partition_status::PS_PRIMARY: {
@@ -318,11 +315,10 @@ void replica::execute_mutation(mutation_ptr &mu)
             CHECK_EQ(_app->last_committed_decree() + 1, d);
             err = _app->apply_mutation(mu);
         } else {
-            LOG_DEBUG("%s: mutation %s commit to %s skipped, 
app.last_committed_decree = %" PRId64,
-                      name(),
-                      mu->name(),
-                      enum_to_string(status()),
-                      _app->last_committed_decree());
+            LOG_DEBUG_PREFIX("mutation {} commit to {} skipped, 
app.last_committed_decree = {}",
+                             mu->name(),
+                             enum_to_string(status()),
+                             _app->last_committed_decree());
 
             // make sure private log saves the state
             // catch-up will be done later after checkpoint task is fininished
@@ -336,11 +332,10 @@ void replica::execute_mutation(mutation_ptr &mu)
             CHECK_EQ(_app->last_committed_decree() + 1, d);
             err = _app->apply_mutation(mu);
         } else {
-            LOG_DEBUG("%s: mutation %s commit to %s skipped, 
app.last_committed_decree = %" PRId64,
-                      name(),
-                      mu->name(),
-                      enum_to_string(status()),
-                      _app->last_committed_decree());
+            LOG_DEBUG_PREFIX("mutation {} commit to {} skipped, 
app.last_committed_decree = {}",
+                             mu->name(),
+                             enum_to_string(status()),
+                             _app->last_committed_decree());
 
             // prepare also happens with learner_status::LearningWithPrepare, 
in this case
             // make sure private log saves the state,
@@ -360,8 +355,7 @@ void replica::execute_mutation(mutation_ptr &mu)
         CHECK(false, "invalid partition_status, status = {}", 
enum_to_string(status()));
     }
 
-    LOG_DEBUG(
-        "TwoPhaseCommit, %s: mutation %s committed, err = %s", name(), 
mu->name(), err.to_string());
+    LOG_DEBUG_PREFIX("TwoPhaseCommit, mutation {} committed, err = {}", 
mu->name(), err);
 
     if (err != ERR_OK) {
         handle_local_failure(err);
diff --git a/src/replica/replica_2pc.cpp b/src/replica/replica_2pc.cpp
index 8d912f634..1194859b2 100644
--- a/src/replica/replica_2pc.cpp
+++ b/src/replica/replica_2pc.cpp
@@ -154,7 +154,7 @@ void replica::on_client_write(dsn::message_ex *request, 
bool ignore_throttling)
         return;
     }
 
-    LOG_DEBUG("%s: got write request from %s", name(), 
request->header->from_address.to_string());
+    LOG_DEBUG_PREFIX("got write request from {}", 
request->header->from_address);
     auto mu = _primary_states.write_queue.add_work(request->rpc_code(), 
request, this);
     if (mu) {
         init_prepare(mu, false);
@@ -346,11 +346,10 @@ void replica::send_prepare_message(::dsn::rpc_address 
addr,
                   },
                   get_gpid().thread_hash());
 
-    LOG_DEBUG("%s: mutation %s send_prepare_message to %s as %s",
-              name(),
-              mu->name(),
-              addr.to_string(),
-              enum_to_string(rconfig.status));
+    LOG_DEBUG_PREFIX("mutation {} send_prepare_message to {} as {}",
+                     mu->name(),
+                     addr,
+                     enum_to_string(rconfig.status));
 }
 
 void replica::do_possible_commit_on_primary(mutation_ptr &mu)
@@ -383,7 +382,7 @@ void replica::on_prepare(dsn::message_ex *request)
 
     decree decree = mu->data.header.decree;
 
-    LOG_DEBUG("%s: mutation %s on_prepare", name(), mu->name());
+    LOG_DEBUG_PREFIX("mutation {} on_prepare", mu->name());
     mu->_tracer->set_name(fmt::format("mutation[{}]", mu->name()));
     mu->_tracer->set_description("secondary");
     ADD_POINT(mu->_tracer);
@@ -517,11 +516,8 @@ void replica::on_append_log_completed(mutation_ptr &mu, 
error_code err, size_t s
 {
     _checker.only_one_thread_access();
 
-    LOG_DEBUG("%s: append shared log completed for mutation %s, size = %u, err 
= %s",
-              name(),
-              mu->name(),
-              size,
-              err.to_string());
+    LOG_DEBUG_PREFIX(
+        "append shared log completed for mutation {}, size = {}, err = {}", 
mu->name(), size, err);
 
     ADD_POINT(mu->_tracer);
 
@@ -610,14 +606,13 @@ void replica::on_prepare_reply(std::pair<mutation_ptr, 
partition_status::type> p
     ADD_CUSTOM_POINT(send_prepare_tracer, resp.err.to_string());
 
     if (resp.err == ERR_OK) {
-        LOG_DEBUG("%s: mutation %s on_prepare_reply from %s, appro_data_bytes 
= %d, "
-                  "target_status = %s, err = %s",
-                  name(),
-                  mu->name(),
-                  node.to_string(),
-                  mu->appro_data_bytes(),
-                  enum_to_string(target_status),
-                  resp.err.to_string());
+        LOG_DEBUG_PREFIX("mutation {} on_prepare_reply from {}, 
appro_data_bytes = {}, "
+                         "target_status = {}, err = {}",
+                         mu->name(),
+                         node,
+                         mu->appro_data_bytes(),
+                         enum_to_string(target_status),
+                         resp.err);
     } else {
         LOG_ERROR("%s: mutation %s on_prepare_reply from %s, appro_data_bytes 
= %d, "
                   "target_status = %s, err = %s",
diff --git a/src/replica/replica_config.cpp b/src/replica/replica_config.cpp
index 9f481b7a0..48bc034d9 100644
--- a/src/replica/replica_config.cpp
+++ b/src/replica/replica_config.cpp
@@ -81,7 +81,7 @@ void replica::on_config_proposal(configuration_update_request 
&proposal)
     }
 
     if (_primary_states.reconfiguration_task != nullptr) {
-        LOG_DEBUG("%s: reconfiguration on the way, skip the incoming 
proposal", name());
+        LOG_DEBUG_PREFIX("reconfiguration on the way, skip the incoming 
proposal");
         return;
     }
 
@@ -1124,9 +1124,8 @@ void replica::replay_prepare_list()
         mutation_ptr mu = new_mutation(decree);
 
         if (old != nullptr) {
-            LOG_DEBUG("copy mutation from mutation_tid=%" PRIu64 " to 
mutation_tid=%" PRIu64,
-                      old->tid(),
-                      mu->tid());
+            LOG_DEBUG_PREFIX(
+                "copy mutation from mutation_tid={} to mutation_tid={}", 
old->tid(), mu->tid());
             mu->copy_from(old);
         } else {
             mu->add_client_request(RPC_REPLICATION_WRITE_EMPTY, nullptr);
diff --git a/src/replica/replica_init.cpp b/src/replica/replica_init.cpp
index ceb25e547..d54fe6fff 100644
--- a/src/replica/replica_init.cpp
+++ b/src/replica/replica_init.cpp
@@ -384,22 +384,19 @@ bool replica::replay_mutation(mutation_ptr &mu, bool 
is_private)
     }
 
     if (is_private && offset < _app->init_info().init_offset_in_private_log) {
-        LOG_DEBUG(
-            "%s: replay mutation skipped1 as offset is invalid in private log, 
ballot = %" PRId64
-            ", decree = %" PRId64 ", last_committed_decree = %" PRId64 ", 
offset = %" PRId64,
-            name(),
-            mu->data.header.ballot,
-            d,
-            mu->data.header.last_committed_decree,
-            offset);
+        LOG_DEBUG_PREFIX("replay mutation skipped1 as offset is invalid in 
private log, ballot = {}"
+                         ", decree = {}, last_committed_decree = {}, offset = 
{}",
+                         mu->data.header.ballot,
+                         d,
+                         mu->data.header.last_committed_decree,
+                         offset);
         return false;
     }
 
     if (!is_private && offset < _app->init_info().init_offset_in_shared_log) {
-        LOG_DEBUG(
-            "%s: replay mutation skipped2 as offset is invalid in shared log, 
ballot = %" PRId64
-            ", decree = %" PRId64 ", last_committed_decree = %" PRId64 ", 
offset = %" PRId64,
-            name(),
+        LOG_DEBUG_PREFIX(
+            "replay mutation skipped2 as offset is invalid in shared log, 
ballot = {}, "
+            "decree = {}, last_committed_decree = {}, offset = {}",
             mu->data.header.ballot,
             d,
             mu->data.header.last_committed_decree,
@@ -413,39 +410,34 @@ bool replica::replay_mutation(mutation_ptr &mu, bool 
is_private)
     }
 
     if (d <= last_committed_decree()) {
-        LOG_DEBUG("%s: replay mutation skipped3 as decree is outdated, ballot 
= %" PRId64
-                  ", decree = %" PRId64 "(vs app %" PRId64 "), 
last_committed_decree = %" PRId64
-                  ", offset = %" PRId64,
-                  name(),
-                  mu->data.header.ballot,
-                  d,
-                  last_committed_decree(),
-                  mu->data.header.last_committed_decree,
-                  offset);
+        LOG_DEBUG_PREFIX("replay mutation skipped3 as decree is outdated, 
ballot = {}, "
+                         "decree = {}(vs app {}), last_committed_decree = {}, 
offset = {}",
+                         mu->data.header.ballot,
+                         d,
+                         last_committed_decree(),
+                         mu->data.header.last_committed_decree,
+                         offset);
         return true;
     }
 
     auto old = _prepare_list->get_mutation_by_decree(d);
     if (old != nullptr && old->data.header.ballot >= mu->data.header.ballot) {
-        LOG_DEBUG("%s: replay mutation skipped4 as ballot is outdated, ballot 
= %" PRId64
-                  " (vs local-ballot=%" PRId64 "), decree = %" PRId64
-                  ", last_committed_decree = %" PRId64 ", offset = %" PRId64,
-                  name(),
-                  mu->data.header.ballot,
-                  old->data.header.ballot,
-                  d,
-                  mu->data.header.last_committed_decree,
-                  offset);
+        LOG_DEBUG_PREFIX(
+            "replay mutation skipped4 as ballot is outdated, ballot = {} (vs 
local-ballot={}), "
+            "decree = {}, last_committed_decree = {}, offset = {}",
+            mu->data.header.ballot,
+            old->data.header.ballot,
+            d,
+            mu->data.header.last_committed_decree,
+            offset);
 
         return true;
     }
 
-    LOG_DEBUG("%s: replay mutation ballot = %" PRId64 ", decree = %" PRId64
-              ", last_committed_decree = %" PRId64,
-              name(),
-              mu->data.header.ballot,
-              d,
-              mu->data.header.last_committed_decree);
+    LOG_DEBUG_PREFIX("replay mutation ballot = {}, decree = {}, 
last_committed_decree = {}",
+                     mu->data.header.ballot,
+                     d,
+                     mu->data.header.last_committed_decree);
 
     // prepare
     _uniq_timestamp_us.try_update(mu->data.header.timestamp);
diff --git a/src/replica/replica_learn.cpp b/src/replica/replica_learn.cpp
index d34e63173..65e9143d1 100644
--- a/src/replica/replica_learn.cpp
+++ b/src/replica/replica_learn.cpp
@@ -785,10 +785,9 @@ void replica::on_learn_reply(error_code err, learn_request 
&&req, learn_response
         while (!reader.is_eof()) {
             auto mu = mutation::read_from(reader, nullptr);
             if (mu->data.header.decree > last_committed_decree()) {
-                LOG_DEBUG("%s: on_learn_reply[%016" PRIx64 "]: apply learned 
mutation %s",
-                          name(),
-                          req.signature,
-                          mu->name());
+                LOG_DEBUG_PREFIX("on_learn_reply[{:#018x}]: apply learned 
mutation {}",
+                                 req.signature,
+                                 mu->name());
 
                 // write to private log with no callback, the later 2pc 
ensures that logs
                 // are written to the disk
diff --git a/src/replica/replica_restore.cpp b/src/replica/replica_restore.cpp
index b90bbb228..a40a10733 100644
--- a/src/replica/replica_restore.cpp
+++ b/src/replica/replica_restore.cpp
@@ -454,12 +454,12 @@ void replica::report_restore_status_to_meta()
     rpc::call(target,
               msg,
               &_tracker,
-              [](error_code err, dsn::message_ex *request, dsn::message_ex 
*resp) {
+              [this](error_code err, dsn::message_ex *request, dsn::message_ex 
*resp) {
                   if (err == ERR_OK) {
                       configuration_report_restore_status_response response;
                       ::dsn::unmarshall(resp, response);
                       if (response.err == ERR_OK) {
-                          LOG_DEBUG("report restore status succeed");
+                          LOG_DEBUG_PREFIX("report restore status succeed");
                           return;
                       }
                   } else if (err == ERR_TIMEOUT) {
diff --git a/src/replica/storage/simple_kv/simple_kv.server.impl.cpp 
b/src/replica/storage/simple_kv/simple_kv.server.impl.cpp
index abb41a07e..2be4e93c1 100644
--- a/src/replica/storage/simple_kv/simple_kv.server.impl.cpp
+++ b/src/replica/storage/simple_kv/simple_kv.server.impl.cpp
@@ -67,7 +67,7 @@ void simple_kv_service_impl::on_read(const std::string &key, 
::dsn::rpc_replier<
         }
     }
 
-    LOG_DEBUG("read %s", r.c_str());
+    LOG_DEBUG_F("read {}", r);
     reply(r);
 }
 
@@ -79,7 +79,7 @@ void simple_kv_service_impl::on_write(const kv_pair &pr, 
::dsn::rpc_replier<int3
         _store[pr.key] = pr.value;
     }
 
-    LOG_DEBUG("write %s", pr.key.c_str());
+    LOG_DEBUG_F("write {}", pr.key);
     reply(0);
 }
 
@@ -95,7 +95,7 @@ void simple_kv_service_impl::on_append(const kv_pair &pr, 
::dsn::rpc_replier<int
             _store[pr.key] = pr.value;
     }
 
-    LOG_DEBUG("append %s", pr.key.c_str());
+    LOG_DEBUG_F("append {}", pr.key);
     reply(0);
 }
 
diff --git a/src/reporter/pegasus_counter_reporter.cpp 
b/src/reporter/pegasus_counter_reporter.cpp
index a2f2d8315..9f5cff423 100644
--- a/src/reporter/pegasus_counter_reporter.cpp
+++ b/src/reporter/pegasus_counter_reporter.cpp
@@ -303,7 +303,7 @@ void pegasus_counter_reporter::http_post_request(const 
std::string &host,
                                                  const std::string 
&contentType,
                                                  const std::string &data)
 {
-    LOG_DEBUG("start update_request, %s", data.c_str());
+    LOG_DEBUG_F("start update_request: {}", data);
     struct event_base *base = event_base_new();
     struct evhttp_connection *conn = evhttp_connection_base_new(base, nullptr, 
host.c_str(), port);
     struct evhttp_request *req =
@@ -330,7 +330,7 @@ void pegasus_counter_reporter::http_request_done(struct 
evhttp_request *req, voi
     } else if (req->response_code == 0) {
         LOG_ERROR("http post request failed: connection refused");
     } else if (req->response_code == HTTP_OK) {
-        LOG_DEBUG("http post request succeed");
+        LOG_DEBUG_F("http post request succeed");
     } else {
         struct evbuffer *buf = evhttp_request_get_input_buffer(req);
         size_t len = evbuffer_get_length(buf);
diff --git a/src/runtime/rpc/asio_rpc_session.cpp 
b/src/runtime/rpc/asio_rpc_session.cpp
index 356bf3594..9018e2f6b 100644
--- a/src/runtime/rpc/asio_rpc_session.cpp
+++ b/src/runtime/rpc/asio_rpc_session.cpp
@@ -45,7 +45,8 @@ void asio_rpc_session::set_options()
         _socket->get_option(option, ec);
         if (ec)
             LOG_WARNING("asio socket get option failed, error = %s", 
ec.message().c_str());
-        LOG_DEBUG("boost asio send buffer size is %u, set as 16MB, now is %u", 
old, option.value());
+        LOG_DEBUG_F(
+            "boost asio send buffer size is {}, set as 16MB, now is {}", old, 
option.value());
 
         boost::asio::socket_base::receive_buffer_size option3, option4(16 * 
1024 * 1024);
         _socket->get_option(option3, ec);
@@ -58,7 +59,8 @@ void asio_rpc_session::set_options()
         _socket->get_option(option3, ec);
         if (ec)
             LOG_WARNING("asio socket get option failed, error = %s", 
ec.message().c_str());
-        LOG_DEBUG("boost asio recv buffer size is %u, set as 16MB, now is %u", 
old, option.value());
+        LOG_DEBUG_F(
+            "boost asio recv buffer size is {}, set as 16MB, now is {}", old, 
option.value());
 
         // Nagle algorithm may cause an extra delay in some cases, because if
         // the data in a single write spans 2n packets, the last packet will be
@@ -71,7 +73,7 @@ void asio_rpc_session::set_options()
         _socket->set_option(boost::asio::ip::tcp::no_delay(true), ec);
         if (ec)
             LOG_WARNING("asio socket set option failed, error = %s", 
ec.message().c_str());
-        LOG_DEBUG("boost asio set no_delay = true");
+        LOG_DEBUG_F("boost asio set no_delay = true");
     }
 }
 
@@ -184,7 +186,7 @@ void asio_rpc_session::connect()
         add_ref();
         _socket->async_connect(ep, [this](boost::system::error_code ec) {
             if (!ec) {
-                LOG_DEBUG("client session %s connected", 
_remote_addr.to_string());
+                LOG_DEBUG_F("client session {} connected", _remote_addr);
 
                 set_options();
                 set_connected();
diff --git a/src/runtime/rpc/network.cpp b/src/runtime/rpc/network.cpp
index 26cf87feb..41189fd20 100644
--- a/src/runtime/rpc/network.cpp
+++ b/src/runtime/rpc/network.cpp
@@ -240,9 +240,8 @@ int rpc_session::prepare_parser()
         }
     }
     _parser = _net.new_message_parser(hdr_format);
-    LOG_DEBUG("message parser created, remote_client = %s, header_format = %s",
-              _remote_addr.to_string(),
-              hdr_format.to_string());
+    LOG_DEBUG_F(
+        "message parser created, remote_client = {}, header_format = {}", 
_remote_addr, hdr_format);
 
     return 0;
 }
@@ -754,9 +753,9 @@ void 
connection_oriented_network::on_server_session_disconnected(rpc_session_ptr
 bool 
connection_oriented_network::check_if_conn_threshold_exceeded(::dsn::rpc_address
 ep)
 {
     if (_cfg_conn_threshold_per_ip <= 0) {
-        LOG_DEBUG("new client from %s is connecting to server %s, no 
connection threshold",
-                  ep.ipv4_str(),
-                  address().to_string());
+        LOG_DEBUG_F("new client from {} is connecting to server {}, no 
connection threshold",
+                    ep.ipv4_str(),
+                    address());
         return false;
     }
 
@@ -773,12 +772,12 @@ bool 
connection_oriented_network::check_if_conn_threshold_exceeded(::dsn::rpc_ad
         exceeded = true;
     }
 
-    LOG_DEBUG("new client from %s is connecting to server %s, existing 
connection count "
-              "= %d, threshold = %u",
-              ep.ipv4_str(),
-              address().to_string(),
-              ip_conn_count,
-              _cfg_conn_threshold_per_ip);
+    LOG_DEBUG_F("new client from {} is connecting to server {}, existing 
connection count = {}, "
+                "threshold = {}",
+                ep.ipv4_str(),
+                address(),
+                ip_conn_count,
+                _cfg_conn_threshold_per_ip);
 
     return exceeded;
 }
diff --git a/src/runtime/rpc/rpc_engine.cpp b/src/runtime/rpc/rpc_engine.cpp
index 1fe71a577..17ebb24ca 100644
--- a/src/runtime/rpc/rpc_engine.cpp
+++ b/src/runtime/rpc/rpc_engine.cpp
@@ -270,9 +270,9 @@ void rpc_client_matcher::on_rpc_timeout(uint64_t key)
 
     if (resend) {
         auto req = call->get_request();
-        LOG_DEBUG("resend request message for rpc trace_id = %016" PRIx64 ", 
key = %" PRIu64,
-                  req->header->trace_id,
-                  key);
+        LOG_DEBUG_F("resend request message for rpc trace_id = {:#018x}, key = 
{}",
+                    req->header->trace_id,
+                    key);
 
         // resend without handling rpc_matcher, use the same request_id
         _engine->call_ip(req->to_address, req, nullptr);
@@ -725,9 +725,9 @@ void rpc_engine::reply(message_ex *response, error_code err)
     // for example, the profiler may be mistakenly calculated
     auto s = response->io_session.get();
     if (s == nullptr && response->to_address.is_invalid()) {
-        LOG_DEBUG("rpc reply %s is dropped (invalid to-address), trace_id = 
%016" PRIx64,
-                  response->header->rpc_name,
-                  response->header->trace_id);
+        LOG_DEBUG_F("rpc reply {} is dropped (invalid to-address), trace_id = 
{:#018x}",
+                    response->header->rpc_name,
+                    response->header->trace_id);
         response->add_ref();
         response->release_ref();
         return;
diff --git a/src/runtime/task/task.h b/src/runtime/task/task.h
index 4cea16417..81284f5fe 100644
--- a/src/runtime/task/task.h
+++ b/src/runtime/task/task.h
@@ -431,10 +431,11 @@ public:
                 _handler(_request);
             }
         } else {
-            LOG_DEBUG("rpc_request_task(%s) from(%s) stop to execute due to 
timeout_ms(%d) exceed",
-                      spec().name.c_str(),
-                      _request->header->from_address.to_string(),
-                      _request->header->client.timeout_ms);
+            LOG_DEBUG_F(
+                "rpc_request_task({}) from({}) stop to execute due to 
timeout_ms({}) exceed",
+                spec().name,
+                _request->header->from_address,
+                _request->header->client.timeout_ms);
             spec().on_rpc_task_dropped.execute(this);
         }
     }
diff --git a/src/server/available_detector.cpp 
b/src/server/available_detector.cpp
index b2f65ab25..6aeb68110 100644
--- a/src/server/available_detector.cpp
+++ b/src/server/available_detector.cpp
@@ -285,12 +285,12 @@ void available_detector::on_detect(int32_t idx)
                  _recent_minute_detect_times.load(),
                  _recent_minute_fail_times.load());
     }
-    LOG_DEBUG("available_detector begin to detect partition[%d] of table[%s] 
with id[%d] on the "
-              "cluster[%s]",
-              idx,
-              _app_name.c_str(),
-              _app_id,
-              _cluster_name.c_str());
+    LOG_DEBUG_F("available_detector begin to detect partition[{}] of table[{}] 
with id[{}] on the "
+                "cluster[{}]",
+                idx,
+                _app_name,
+                _app_id,
+                _cluster_name);
     auto time = dsn_now_ms();
     std::string value = "detect_value_" + std::to_string((time / 1000));
     _recent_day_detect_times.fetch_add(1);
@@ -314,10 +314,10 @@ void available_detector::on_detect(int32_t idx)
             check_and_send_email(&cnt, idx);
         } else {
             cnt.store(0);
-            LOG_DEBUG("async_get partition[%d] ok, hash_key = %s, value = %s",
-                      idx,
-                      _hash_keys[idx].c_str(),
-                      _value.c_str());
+            LOG_DEBUG_F("async_get partition[{}] ok, hash_key = {}, value = 
{}",
+                        idx,
+                        _hash_keys[idx],
+                        _value);
         }
     };
 
@@ -339,7 +339,7 @@ void available_detector::on_detect(int32_t idx)
                       _client->get_error_string(err));
             check_and_send_email(&cnt, idx);
         } else {
-            LOG_DEBUG("async_set partition[%d] ok, hash_key = %s", idx, 
_hash_keys[idx].c_str());
+            LOG_DEBUG_F("async_set partition[{}] ok, hash_key = {}", idx, 
_hash_keys[idx]);
             _client->async_get(
                 _hash_keys[idx], "", std::move(user_async_get_callback), 
_detect_timeout);
         }
diff --git a/src/server/info_collector.cpp b/src/server/info_collector.cpp
index 36ac5606d..3c5c6946f 100644
--- a/src/server/info_collector.cpp
+++ b/src/server/info_collector.cpp
@@ -278,8 +278,8 @@ void info_collector::on_capacity_unit_stat(int 
remaining_retry_count)
     for (node_capacity_unit_stat &elem : nodes_stat) {
         if (elem.node_address.empty() || elem.timestamp.empty() ||
             !has_capacity_unit_updated(elem.node_address, elem.timestamp)) {
-            LOG_DEBUG("recent read/write capacity unit value of node %s has 
not updated",
-                      elem.node_address.c_str());
+            LOG_DEBUG_F("recent read/write capacity unit value of node {} has 
not updated",
+                        elem.node_address);
             continue;
         }
         _result_writer->set_result(elem.timestamp, "cu@" + elem.node_address, 
elem.dump_to_json());
diff --git a/src/server/result_writer.cpp b/src/server/result_writer.cpp
index 0bd2220ef..1552320c8 100644
--- a/src/server/result_writer.cpp
+++ b/src/server/result_writer.cpp
@@ -64,10 +64,10 @@ void result_writer::set_result(const std::string &hash_key,
                           new_try_count);
             }
         } else {
-            LOG_DEBUG("set_result succeed, hash_key = %s, sort_key = %s, value 
= %s",
-                      hash_key.c_str(),
-                      sort_key.c_str(),
-                      value.c_str());
+            LOG_DEBUG_F("set_result succeed, hash_key = {}, sort_key = {}, 
value = {}",
+                        hash_key,
+                        sort_key,
+                        value);
         }
     };
 
diff --git a/src/test/kill_test/data_verifier.cpp 
b/src/test/kill_test/data_verifier.cpp
index d92aa40d4..6a7ba2203 100644
--- a/src/test/kill_test/data_verifier.cpp
+++ b/src/test/kill_test/data_verifier.cpp
@@ -228,16 +228,16 @@ void do_get_range(int thread_id, int round_id, long long 
start_id, long long end
                           info.server.c_str());
                 exit(-1);
             } else {
-                LOG_DEBUG("GetThread[%d]: round(%d): get succeed: id=%lld, 
try=%d, time=%ld "
-                          "(gpid=%d.%d, server=%s)",
-                          thread_id,
-                          round_id,
-                          id,
-                          try_count,
-                          (cur_time - last_time),
-                          info.app_id,
-                          info.partition_index,
-                          info.server.c_str());
+                LOG_DEBUG_F("GetThread[{}]: round({}): get succeed: id={}, 
try={}, time={} "
+                            "(gpid={}.{}, server={})",
+                            thread_id,
+                            round_id,
+                            id,
+                            try_count,
+                            (cur_time - last_time),
+                            info.app_id,
+                            info.partition_index,
+                            info.server);
                 stat_time[stat_count++] = cur_time - last_time;
                 if (stat_count == stat_batch) {
                     std::sort(stat_time.begin(), stat_time.end());
diff --git a/src/test/kill_test/kill_testor.cpp 
b/src/test/kill_test/kill_testor.cpp
index 3dcf0b98c..4eb2c22df 100644
--- a/src/test/kill_test/kill_testor.cpp
+++ b/src/test/kill_test/kill_testor.cpp
@@ -121,7 +121,7 @@ dsn::error_code kill_testor::get_partition_info(bool 
debug_unhealthy,
     dsn::error_code err = ddl_client->list_app(app_name, app_id, 
partition_count, partitions);
 
     if (err == ::dsn::ERR_OK) {
-        LOG_DEBUG("access meta and query partition status success");
+        LOG_DEBUG_F("access meta and query partition status success");
         for (int i = 0; i < partitions.size(); i++) {
             const dsn::partition_configuration &p = partitions[i];
             int replica_count = 0;
@@ -147,13 +147,13 @@ dsn::error_code kill_testor::get_partition_info(bool 
debug_unhealthy,
                 if (debug_unhealthy) {
                     LOG_INFO("found unhealthy partition, %s", 
info.str().c_str());
                 } else {
-                    LOG_DEBUG("found unhealthy partition, %s", 
info.str().c_str());
+                    LOG_DEBUG_F("found unhealthy partition, {}", info.str());
                 }
             }
         }
         unhealthy_partition_cnt = partition_count - healthy_partition_cnt;
     } else {
-        LOG_DEBUG("access meta and query partition status fail");
+        LOG_DEBUG_F("access meta and query partition status fail");
         healthy_partition_cnt = 0;
         unhealthy_partition_cnt = 0;
     }
@@ -172,10 +172,10 @@ bool kill_testor::check_cluster_status()
                                                  unhealthy_partition_cnt);
         if (err == dsn::ERR_OK) {
             if (unhealthy_partition_cnt > 0) {
-                LOG_DEBUG("query partition status success, but still have 
unhealthy partition, "
-                          "healthy_partition_count = %d, 
unhealthy_partition_count = %d",
-                          healthy_partition_cnt,
-                          unhealthy_partition_cnt);
+                LOG_DEBUG_F("query partition status success, but still have 
unhealthy partition, "
+                            "healthy_partition_count = {}, 
unhealthy_partition_count = {}",
+                            healthy_partition_cnt,
+                            unhealthy_partition_cnt);
                 sleep(1);
             } else
                 return true;
diff --git a/src/utils/api_utilities.h b/src/utils/api_utilities.h
index 1d6c0026e..e3c25d0a2 100644
--- a/src/utils/api_utilities.h
+++ b/src/utils/api_utilities.h
@@ -87,7 +87,6 @@ extern void dsn_coredump();
             dsn_logf(__FILENAME__, __FUNCTION__, __LINE__, level, 
__VA_ARGS__);                    \
     } while (false)
 
-#define LOG_DEBUG(...) dlog(LOG_LEVEL_DEBUG, __VA_ARGS__)
 #define LOG_INFO(...) dlog(LOG_LEVEL_INFO, __VA_ARGS__)
 #define LOG_WARNING(...) dlog(LOG_LEVEL_WARNING, __VA_ARGS__)
 #define LOG_ERROR(...) dlog(LOG_LEVEL_ERROR, __VA_ARGS__)
diff --git a/src/utils/token_bucket_throttling_controller.cpp 
b/src/utils/token_bucket_throttling_controller.cpp
index cbadc5f0f..8c1fd995e 100644
--- a/src/utils/token_bucket_throttling_controller.cpp
+++ b/src/utils/token_bucket_throttling_controller.cpp
@@ -19,6 +19,7 @@
 
 #include "api_utilities.h"
 #include "string_conv.h"
+#include "utils/fmt_logging.h"
 
 namespace dsn {
 namespace utils {
@@ -145,8 +146,9 @@ bool 
token_bucket_throttling_controller::transform_env_string(const std::string
         reject_size_value = 1;
         enabled = false;
 
-        LOG_DEBUG("token_bucket_throttling_controller doesn't support delay 
method, so throttling "
-                  "controller is disabled now");
+        LOG_DEBUG_F(
+            "token_bucket_throttling_controller doesn't support delay method, 
so throttling "
+            "controller is disabled now");
         return true;
     }
 
diff --git a/src/zookeeper/lock_struct.cpp b/src/zookeeper/lock_struct.cpp
index b77bbf109..8ef4ad52c 100644
--- a/src/zookeeper/lock_struct.cpp
+++ b/src/zookeeper/lock_struct.cpp
@@ -556,7 +556,7 @@ void lock_struct::after_create_locknode(lock_struct_ptr 
_this,
     __check_code(ec, allow_ec, 2, zerror(ec));
     __check_code(_this->_state, allow_state, 3, string_state(_this->_state));
 
-    LOG_DEBUG("after create seq and ephe node, error(%s), path(%s)", 
zerror(ec), path->c_str());
+    LOG_DEBUG_F("after create seq and ephe node, error({}), path({})", 
zerror(ec), *path);
     if (_this->_state == lock_state::cancelled || _this->_state == 
lock_state::expired) {
         LOG_INFO("current state(%s), ignore event create lockdir(%s)", 
_this->_lock_dir.c_str());
         if (ZOK == ec && _this->_state == lock_state::cancelled) {
diff --git a/src/zookeeper/zookeeper_session.cpp 
b/src/zookeeper/zookeeper_session.cpp
index ad7cacb32..1839b952b 100644
--- a/src/zookeeper/zookeeper_session.cpp
+++ b/src/zookeeper/zookeeper_session.cpp
@@ -335,9 +335,9 @@ void zookeeper_session::global_watcher(
 void zookeeper_session::global_string_completion(int rc, const char *name, 
const void *data)
 {
     COMPLETION_INIT(rc, data);
-    LOG_DEBUG("rc(%s), input path(%s)", zerror(rc), 
op_ctx->_input._path.c_str());
+    LOG_DEBUG_F("rc({}), input path({})", zerror(rc), op_ctx->_input._path);
     if (ZOK == rc && name != nullptr)
-        LOG_DEBUG("created path:%s", name);
+        LOG_DEBUG_F("created path: {}", name);
     output.create_op._created_path = name;
     op_ctx->_callback_function(op_ctx);
     release_ref(op_ctx);
@@ -347,7 +347,7 @@ void zookeeper_session::global_data_completion(
     int rc, const char *value, int value_length, const Stat *, const void 
*data)
 {
     COMPLETION_INIT(rc, data);
-    LOG_DEBUG("rc(%s), input path(%s)", zerror(rc), 
op_ctx->_input._path.c_str());
+    LOG_DEBUG_F("rc({}), input path({})", zerror(rc), op_ctx->_input._path);
     output.get_op.value_length = value_length;
     output.get_op.value = value;
     op_ctx->_callback_function(op_ctx);
@@ -357,7 +357,7 @@ void zookeeper_session::global_data_completion(
 void zookeeper_session::global_state_completion(int rc, const Stat *stat, 
const void *data)
 {
     COMPLETION_INIT(rc, data);
-    LOG_DEBUG("rc(%s), input path(%s)", zerror(rc), 
op_ctx->_input._path.c_str());
+    LOG_DEBUG_F("rc({}), input path({})", zerror(rc), op_ctx->_input._path);
     if (op_ctx->_optype == ZOO_EXISTS) {
         output.exists_op._node_stat = stat;
         op_ctx->_callback_function(op_ctx);
@@ -373,9 +373,9 @@ void zookeeper_session::global_strings_completion(int rc,
                                                   const void *data)
 {
     COMPLETION_INIT(rc, data);
-    LOG_DEBUG("rc(%s), input path(%s)", zerror(rc), 
op_ctx->_input._path.c_str());
+    LOG_DEBUG_F("rc({}), input path({})", zerror(rc), op_ctx->_input._path);
     if (rc == ZOK && strings != nullptr)
-        LOG_DEBUG("child count: %d", strings->count);
+        LOG_DEBUG_F("child count: {}", strings->count);
     output.getchildren_op.strings = strings;
     op_ctx->_callback_function(op_ctx);
     release_ref(op_ctx);
@@ -385,9 +385,9 @@ void zookeeper_session::global_void_completion(int rc, 
const void *data)
 {
     COMPLETION_INIT(rc, data);
     if (op_ctx->_optype == ZOO_DELETE)
-        LOG_DEBUG("rc(%s), input path( %s )", zerror(rc), 
op_ctx->_input._path.c_str());
+        LOG_DEBUG_F("rc({}), input path({})", zerror(rc), 
op_ctx->_input._path);
     else
-        LOG_DEBUG("rc(%s)", zerror(rc));
+        LOG_DEBUG_F("rc({})", zerror(rc));
     op_ctx->_callback_function(op_ctx);
     release_ref(op_ctx);
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to