This is an automated email from the ASF dual-hosted git repository.
laiyingchun pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git
The following commit(s) were added to refs/heads/master by this push:
new 3a4e1cbac refactor(log): use LOG_WARNING_F instead of LOG_WARNING
(3/3) (#1319)
3a4e1cbac is described below
commit 3a4e1cbacc9afa2465ee90de5c9528d36f7c98aa
Author: WHBANG <[email protected]>
AuthorDate: Thu Jan 19 19:15:22 2023 +0800
refactor(log): use LOG_WARNING_F instead of LOG_WARNING (3/3) (#1319)
---
src/block_service/local/local_service.cpp | 31 ++++++------
src/client/partition_resolver_simple.cpp | 11 ++---
src/client_lib/pegasus_client_factory_impl.cpp | 2 +-
src/common/fs_manager.cpp | 7 +--
src/common/storage_serverlet.h | 8 ++--
src/failure_detector/failure_detector.cpp | 45 ++++++++---------
src/failure_detector/test/failure_detector.cpp | 2 +-
src/nfs/nfs_client_impl.cpp | 14 +++---
src/redis_protocol/proxy_lib/proxy_layer.cpp | 2 +-
src/redis_protocol/proxy_lib/redis_parser.cpp | 12 ++---
src/runtime/rpc/asio_net_provider.cpp | 16 +++----
src/runtime/rpc/asio_rpc_session.cpp | 18 +++----
src/runtime/rpc/network.cpp | 4 +-
src/runtime/rpc/rpc_engine.cpp | 44 ++++++++---------
src/runtime/security/client_negotiation.cpp | 4 +-
src/runtime/security/kinit_context.cpp | 2 +-
src/runtime/security/server_negotiation.cpp | 4 +-
src/runtime/task/task.cpp | 10 ++--
src/runtime/task/task_spec.cpp | 16 +++----
src/runtime/task/task_worker.cpp | 11 +++--
src/runtime/zlocks.cpp | 18 ++++---
src/server/available_detector.cpp | 5 +-
src/server/info_collector.cpp | 16 +++----
src/server/pegasus_server_impl.cpp | 56 +++++++++++-----------
src/server/result_writer.cpp | 14 +++---
src/shell/command_helper.h | 6 +--
src/test/pressure_test/main.cpp | 6 +--
src/utils/api_utilities.h | 1 -
src/utils/filesystem.cpp | 23 ++++-----
src/utils/utils.cpp | 10 ++--
.../distributed_lock_service_zookeeper.cpp | 5 +-
src/zookeeper/lock_struct.cpp | 35 +++++++-------
32 files changed, 219 insertions(+), 239 deletions(-)
diff --git a/src/block_service/local/local_service.cpp
b/src/block_service/local/local_service.cpp
index ce4d73f57..b22fcc539 100644
--- a/src/block_service/local/local_service.cpp
+++ b/src/block_service/local/local_service.cpp
@@ -81,8 +81,7 @@ error_code local_service::initialize(const
std::vector<std::string> &args)
LOG_INFO_F("initialize local block service succeed with empty root");
} else {
if (::dsn::utils::filesystem::directory_exists(_root)) {
- LOG_WARNING("old local block service root dir has already exist,
path(%s)",
- _root.c_str());
+ LOG_WARNING_F("old local block service root dir has already exist,
path({})", _root);
} else {
CHECK(::dsn::utils::filesystem::create_directory(_root),
"local block service create directory({}) fail",
@@ -265,7 +264,8 @@ error_code local_file_object::load_metadata()
std::string metadata_path = local_service::get_metafile(file_name());
std::ifstream is(metadata_path, std::ios::in);
if (!is.is_open()) {
- LOG_WARNING("load meta data from %s failed, err = %s",
utils::safe_strerror(errno).c_str());
+ LOG_WARNING_F(
+ "load meta data from {} failed, err = {}", metadata_path,
utils::safe_strerror(errno));
return ERR_FS_INTERNAL;
}
auto cleanup = dsn::defer([&is]() { is.close(); });
@@ -290,9 +290,8 @@ error_code local_file_object::store_metadata()
std::string metadata_path = local_service::get_metafile(file_name());
std::ofstream os(metadata_path, std::ios::out | std::ios::trunc);
if (!os.is_open()) {
- LOG_WARNING("store to metadata file %s failed, err=%s",
- metadata_path.c_str(),
- utils::safe_strerror(errno).c_str());
+ LOG_WARNING_F(
+ "store to metadata file {} failed, err={}", metadata_path,
utils::safe_strerror(errno));
return ERR_FS_INTERNAL;
}
auto cleanup = dsn::defer([&os]() { os.close(); });
@@ -376,7 +375,7 @@ dsn::task_ptr local_file_object::read(const read_request
&req,
resp.err = ERR_OBJECT_NOT_FOUND;
} else {
if ((resp.err = load_metadata()) != ERR_OK) {
- LOG_WARNING("load meta data of %s failed",
file_name().c_str());
+ LOG_WARNING_F("load meta data of {} failed", file_name());
} else {
int64_t file_sz = _size;
int64_t total_sz = 0;
@@ -423,18 +422,18 @@ dsn::task_ptr local_file_object::upload(const
upload_request &req,
resp.err = ERR_OK;
std::ifstream fin(req.input_local_name, std::ios_base::in);
if (!fin.is_open()) {
- LOG_WARNING("open source file %s for read failed, err(%s)",
- req.input_local_name.c_str(),
- utils::safe_strerror(errno).c_str());
+ LOG_WARNING_F("open source file {} for read failed, err({})",
+ req.input_local_name,
+ utils::safe_strerror(errno));
resp.err = ERR_FILE_OPERATION_FAILED;
}
utils::filesystem::create_file(file_name());
std::ofstream fout(file_name(), std::ios_base::out |
std::ios_base::trunc);
if (!fout.is_open()) {
- LOG_WARNING("open target file %s for write failed, err(%s)",
- file_name().c_str(),
- utils::safe_strerror(errno).c_str());
+ LOG_WARNING_F("open target file {} for write failed, err({})",
+ file_name(),
+ utils::safe_strerror(errno));
resp.err = ERR_FS_INTERNAL;
}
@@ -542,9 +541,9 @@ dsn::task_ptr local_file_object::download(const
download_request &req,
_size = total_sz;
if ((resp.err = utils::filesystem::md5sum(target_file,
_md5_value)) != ERR_OK) {
- LOG_WARNING("download %s failed when calculate the md5sum
of %s",
- file_name().c_str(),
- target_file.c_str());
+ LOG_WARNING_F("download {} failed when calculate the
md5sum of {}",
+ file_name(),
+ target_file);
} else {
_has_meta_synced = true;
resp.file_md5 = _md5_value;
diff --git a/src/client/partition_resolver_simple.cpp
b/src/client/partition_resolver_simple.cpp
index 139f5fc55..f9e2467ac 100644
--- a/src/client/partition_resolver_simple.cpp
+++ b/src/client/partition_resolver_simple.cpp
@@ -269,18 +269,17 @@ void
partition_resolver_simple::query_config_reply(error_code err,
if (_app_id != -1 && _app_id != resp.app_id) {
LOG_WARNING_F(
"app id is changed (mostly the app was removed and created
with the same "
- "name), local Vs remote: {} vs {} ",
+ "name), local vs remote: {} vs {} ",
_app_id,
resp.app_id);
}
if (_app_partition_count != -1 && _app_partition_count !=
resp.partition_count &&
_app_partition_count * 2 != resp.partition_count &&
_app_partition_count != resp.partition_count * 2) {
- LOG_WARNING_F(
- "partition count is changed (mostly the app was removed
and created with "
- "the same name), local Vs remote: %u vs %u ",
- _app_partition_count,
- resp.partition_count);
+ LOG_WARNING_F("partition count is changed (mostly the app was
removed and created "
+ "with the same name), local vs remote: {} vs {}
",
+ _app_partition_count,
+ resp.partition_count);
}
_app_id = resp.app_id;
_app_partition_count = resp.partition_count;
diff --git a/src/client_lib/pegasus_client_factory_impl.cpp
b/src/client_lib/pegasus_client_factory_impl.cpp
index b4f6b4a1c..b68b8b9ac 100644
--- a/src/client_lib/pegasus_client_factory_impl.cpp
+++ b/src/client_lib/pegasus_client_factory_impl.cpp
@@ -33,7 +33,7 @@ bool pegasus_client_factory_impl::initialize(const char
*config_file)
CHECK(is_initialized, "rdsn engine not started, please specify a valid
config file");
} else {
if (is_initialized) {
- LOG_WARNING("rdsn engine already started, ignore the config file
'%s'", config_file);
+ LOG_WARNING_F("rdsn engine already started, ignore the config file
'{}'", config_file);
} else {
// use config file to run
char exe[] = "client";
diff --git a/src/common/fs_manager.cpp b/src/common/fs_manager.cpp
index 783e77c5b..1215a088c 100644
--- a/src/common/fs_manager.cpp
+++ b/src/common/fs_manager.cpp
@@ -212,11 +212,8 @@ void fs_manager::add_replica(const gpid &pid, const
std::string &pid_dir)
std::set<dsn::gpid> &replicas_for_app =
n->holding_replicas[pid.get_app_id()];
auto result = replicas_for_app.emplace(pid);
if (!result.second) {
- LOG_WARNING("%s: gpid(%d.%d) already in the dir_node(%s)",
- dsn_primary_address().to_string(),
- pid.get_app_id(),
- pid.get_partition_index(),
- n->tag.c_str());
+ LOG_WARNING_F(
+ "{}: gpid({}) already in the dir_node({})",
dsn_primary_address(), pid, n->tag);
} else {
LOG_INFO_F("{}: add gpid({}) to dir_node({})",
dsn_primary_address(), pid, n->tag);
}
diff --git a/src/common/storage_serverlet.h b/src/common/storage_serverlet.h
index a06709d41..bef086946 100644
--- a/src/common/storage_serverlet.h
+++ b/src/common/storage_serverlet.h
@@ -129,10 +129,10 @@ protected:
if (ptr != nullptr) {
(*ptr)(static_cast<T *>(this), request);
} else {
- LOG_WARNING("recv message with unhandled rpc name %s from %s,
trace_id = %016" PRIx64,
- t.to_string(),
- request->header->from_address.to_string(),
- request->header->trace_id);
+ LOG_WARNING_F("recv message with unhandled rpc name {} from {},
trace_id = {:#018x} ",
+ t,
+ request->header->from_address,
+ request->header->trace_id);
dsn_rpc_reply(request->create_response(),
::dsn::ERR_HANDLER_NOT_FOUND);
}
return 0;
diff --git a/src/failure_detector/failure_detector.cpp
b/src/failure_detector/failure_detector.cpp
index c042bbbbd..231bb08ab 100644
--- a/src/failure_detector/failure_detector.cpp
+++ b/src/failure_detector/failure_detector.cpp
@@ -150,9 +150,8 @@ bool failure_detector::switch_master(::dsn::rpc_address
from,
auto it2 = _masters.find(to);
if (it != _masters.end()) {
if (it2 != _masters.end()) {
- LOG_WARNING("switch master failed as both are already registered,
from[%s], to[%s]",
- from.to_string(),
- to.to_string());
+ LOG_WARNING_F(
+ "switch master failed as both are already registered,
from[{}], to[{}]", from, to);
return false;
}
@@ -172,9 +171,8 @@ bool failure_detector::switch_master(::dsn::rpc_address
from,
LOG_INFO_F("switch master successfully, from[{}], to[{}]", from, to);
} else {
- LOG_WARNING("switch master failed as from node is not registered yet,
from[%s], to[%s]",
- from.to_string(),
- to.to_string());
+ LOG_WARNING_F(
+ "switch master failed as from node is not registered yet,
from[{}], to[{}]", from, to);
return false;
}
return true;
@@ -312,8 +310,8 @@ void failure_detector::set_allow_list(const
std::vector<std::string> &replica_ad
for (auto &addr : replica_addrs) {
rpc_address node;
if (!node.from_string_ipv4(addr.c_str())) {
- LOG_WARNING("replica_white_list has invalid ip %s, the allow list
won't be modified",
- addr.c_str());
+ LOG_WARNING_F("replica_white_list has invalid ip {}, the allow
list won't be modified",
+ addr);
return;
}
nodes.push_back(node);
@@ -356,7 +354,7 @@ void failure_detector::on_ping_internal(const beacon_msg
&beacon, /*out*/ beacon
if (itr == _workers.end()) {
// if is a new worker, check allow list first if need
if (_use_allow_list && _allow_list.find(node) == _allow_list.end()) {
- LOG_WARNING("new worker[%s] is rejected", node.to_string());
+ LOG_WARNING_F("new worker[{}] is rejected", node);
ack.allowed = false;
return;
}
@@ -408,29 +406,29 @@ bool
failure_detector::end_ping_internal(::dsn::error_code err, const beacon_ack
auto node = ack.this_node;
if (err != ERR_OK) {
- LOG_WARNING("ping master(%s) failed, timeout_ms = %u, err = %s",
- node.to_string(),
- _beacon_timeout_milliseconds,
- err.to_string());
+ LOG_WARNING_F("ping master({}) failed, timeout_ms = {}, err = {}",
+ node,
+ _beacon_timeout_milliseconds,
+ err);
_recent_beacon_fail_count->increment();
}
master_map::iterator itr = _masters.find(node);
if (itr == _masters.end()) {
- LOG_WARNING("received beacon ack without corresponding master, ignore
it, "
- "remote_master[%s], local_worker[%s]",
- node.to_string(),
- dsn_primary_address().to_string());
+ LOG_WARNING_F("received beacon ack without corresponding master,
ignore it, "
+ "remote_master[{}], local_worker[{}]",
+ node,
+ dsn_primary_address());
return false;
}
master_record &record = itr->second;
if (!ack.allowed) {
- LOG_WARNING("worker rejected, stop sending beacon message, "
- "remote_master[%s], local_worker[%s]",
- node.to_string(),
- dsn_primary_address().to_string());
+ LOG_WARNING_F(
+ "worker rejected, stop sending beacon message, remote_master[{}],
local_worker[{}]",
+ node,
+ dsn_primary_address());
record.rejected = true;
record.send_beacon_timer->cancel(true);
return false;
@@ -451,9 +449,8 @@ bool failure_detector::end_ping_internal(::dsn::error_code
err, const beacon_ack
// if ack is not from master meta, worker should not update its last send
time
if (!ack.is_master) {
- LOG_WARNING("node[%s] is not master, ack.primary_node[%s] is real
master",
- node.to_string(),
- ack.primary_node.to_string());
+ LOG_WARNING_F(
+ "node[{}] is not master, ack.primary_node[{}] is real master",
node, ack.primary_node);
return true;
}
diff --git a/src/failure_detector/test/failure_detector.cpp
b/src/failure_detector/test/failure_detector.cpp
index b7b497150..ff2de6d66 100644
--- a/src/failure_detector/test/failure_detector.cpp
+++ b/src/failure_detector/test/failure_detector.cpp
@@ -328,7 +328,7 @@ void clear(test_worker *worker, std::vector<test_master *>
masters)
void finish(test_worker *worker, test_master *master, int master_index)
{
- LOG_WARNING("start to finish");
+ LOG_WARNING_F("start to finish");
std::atomic_int wait_count;
wait_count.store(2);
worker->fd()->when_disconnected(
diff --git a/src/nfs/nfs_client_impl.cpp b/src/nfs/nfs_client_impl.cpp
index 41e4140cd..515796b8d 100644
--- a/src/nfs/nfs_client_impl.cpp
+++ b/src/nfs/nfs_client_impl.cpp
@@ -334,13 +334,13 @@ void nfs_client_impl::end_copy(::dsn::error_code err,
if (!fc->user_req->is_finished) {
if (reqc->retry_count > 0) {
- LOG_WARNING("{nfs_service} remote copy failed, source = %s,
dir = %s, file = %s, "
- "err = %s, retry_count = %d",
- fc->user_req->file_size_req.source.to_string(),
- fc->user_req->file_size_req.source_dir.c_str(),
- fc->file_name.c_str(),
- err.to_string(),
- reqc->retry_count);
+ LOG_WARNING_F("[nfs_service] remote copy failed, source = {},
dir = {}, file = {}, "
+ "err = {}, retry_count = {}",
+ fc->user_req->file_size_req.source,
+ fc->user_req->file_size_req.source_dir,
+ fc->file_name,
+ err,
+ reqc->retry_count);
// retry copy
reqc->retry_count--;
diff --git a/src/redis_protocol/proxy_lib/proxy_layer.cpp
b/src/redis_protocol/proxy_lib/proxy_layer.cpp
index 685860a85..e6db673d8 100644
--- a/src/redis_protocol/proxy_lib/proxy_layer.cpp
+++ b/src/redis_protocol/proxy_lib/proxy_layer.cpp
@@ -93,7 +93,7 @@ void proxy_stub::remove_session(dsn::rpc_address
remote_address)
::dsn::zauto_write_lock l(_lock);
auto iter = _sessions.find(remote_address);
if (iter == _sessions.end()) {
- LOG_WARNING("%s has been removed from proxy stub",
remote_address.to_string());
+ LOG_WARNING_F("{} has been removed from proxy stub",
remote_address);
return;
}
LOG_INFO_F("remove {} from proxy stub", remote_address);
diff --git a/src/redis_protocol/proxy_lib/redis_parser.cpp
b/src/redis_protocol/proxy_lib/redis_parser.cpp
index a81ae324f..ab0f49a65 100644
--- a/src/redis_protocol/proxy_lib/redis_parser.cpp
+++ b/src/redis_protocol/proxy_lib/redis_parser.cpp
@@ -899,7 +899,7 @@ void redis_parser::counter_internal(message_entry &entry)
if (dsn::utils::iequals(command, "INCR") || dsn::utils::iequals(command,
"DECR")) {
if (entry.request.sub_requests.size() != 2) {
LOG_WARNING_F("{}: command {} seqid({}) with invalid arguments
count: {}",
- _remote_address.to_string(),
+ _remote_address,
command,
entry.sequence_id,
entry.request.sub_requests.size());
@@ -909,7 +909,7 @@ void redis_parser::counter_internal(message_entry &entry)
} else if (dsn::utils::iequals(command, "INCRBY") ||
dsn::utils::iequals(command, "DECRBY")) {
if (entry.request.sub_requests.size() != 3) {
LOG_WARNING_F("{}: command {} seqid({}) with invalid arguments
count: {}",
- _remote_address.to_string(),
+ _remote_address,
command,
entry.sequence_id,
entry.request.sub_requests.size());
@@ -918,7 +918,7 @@ void redis_parser::counter_internal(message_entry &entry)
}
if (!dsn::buf2int64(entry.request.sub_requests[2].data, increment)) {
LOG_WARNING_F("{}: command {} seqid({}) with invalid 'increment':
{}",
- _remote_address.to_string(),
+ _remote_address,
command,
entry.sequence_id,
entry.request.sub_requests[2].data.to_string());
@@ -938,7 +938,7 @@ void redis_parser::counter_internal(message_entry &entry)
::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) {
if (_is_session_reset.load(std::memory_order_acquire)) {
LOG_WARNING_F("{}: command {} seqid({}) got reply, but session has
reset",
- _remote_address.to_string(),
+ _remote_address,
command,
entry.sequence_id);
return;
@@ -946,10 +946,10 @@ void redis_parser::counter_internal(message_entry &entry)
if (::dsn::ERR_OK != ec) {
LOG_WARNING_F("{}: command {} seqid({}) got reply with error = {}",
- _remote_address.to_string(),
+ _remote_address,
command,
entry.sequence_id,
- ec.to_string());
+ ec);
simple_error_reply(entry, ec.to_string());
} else {
::dsn::apps::incr_response incr_resp;
diff --git a/src/runtime/rpc/asio_net_provider.cpp
b/src/runtime/rpc/asio_net_provider.cpp
index d533c9aa2..ff6cd6517 100644
--- a/src/runtime/rpc/asio_net_provider.cpp
+++ b/src/runtime/rpc/asio_net_provider.cpp
@@ -164,10 +164,10 @@ void asio_network_provider::do_accept()
// when server connection threshold is hit, close the session,
otherwise accept it
if (check_if_conn_threshold_exceeded(s->remote_address())) {
- LOG_WARNING("close rpc connection from %s to %s due to
hitting server "
- "connection threshold per ip",
- s->remote_address().to_string(),
- address().to_string());
+ LOG_WARNING_F("close rpc connection from {} to {} due to
hitting server "
+ "connection threshold per ip",
+ s->remote_address(),
+ address());
s->close();
} else {
on_server_session_accepted(s);
@@ -210,10 +210,10 @@ void asio_udp_provider::send_message(message_ex *request)
ep,
[=](const boost::system::error_code &error, std::size_t
bytes_transferred) {
if (error) {
- LOG_WARNING("send udp packet to ep %s:%d failed, message = %s",
- ep.address().to_string().c_str(),
- ep.port(),
- error.message().c_str());
+ LOG_WARNING_F("send udp packet to ep {}:{} failed, message =
{}",
+ ep.address(),
+ ep.port(),
+ error.message());
// we do not handle failure here, rpc matcher would handle
timeouts
}
});
diff --git a/src/runtime/rpc/asio_rpc_session.cpp
b/src/runtime/rpc/asio_rpc_session.cpp
index d2e4f8879..6fe886244 100644
--- a/src/runtime/rpc/asio_rpc_session.cpp
+++ b/src/runtime/rpc/asio_rpc_session.cpp
@@ -37,28 +37,28 @@ void asio_rpc_session::set_options()
boost::asio::socket_base::send_buffer_size option, option2(16 * 1024 *
1024);
_socket->get_option(option, ec);
if (ec)
- LOG_WARNING("asio socket get option failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket get option failed, error = {}",
ec.message());
int old = option.value();
_socket->set_option(option2, ec);
if (ec)
- LOG_WARNING("asio socket set option failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket set option failed, error = {}",
ec.message());
_socket->get_option(option, ec);
if (ec)
- LOG_WARNING("asio socket get option failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket get option failed, error = {}",
ec.message());
LOG_DEBUG_F(
"boost asio send buffer size is {}, set as 16MB, now is {}", old,
option.value());
boost::asio::socket_base::receive_buffer_size option3, option4(16 *
1024 * 1024);
_socket->get_option(option3, ec);
if (ec)
- LOG_WARNING("asio socket get option failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket get option failed, error = {}",
ec.message());
old = option3.value();
_socket->set_option(option4, ec);
if (ec)
- LOG_WARNING("asio socket set option failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket set option failed, error = {}",
ec.message());
_socket->get_option(option3, ec);
if (ec)
- LOG_WARNING("asio socket get option failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket get option failed, error = {}",
ec.message());
LOG_DEBUG_F(
"boost asio recv buffer size is {}, set as 16MB, now is {}", old,
option.value());
@@ -72,7 +72,7 @@ void asio_rpc_session::set_options()
// * decrease the qps (negative)
_socket->set_option(boost::asio::ip::tcp::no_delay(true), ec);
if (ec)
- LOG_WARNING("asio socket set option failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket set option failed, error = {}",
ec.message());
LOG_DEBUG_F("boost asio set no_delay = true");
}
}
@@ -169,10 +169,10 @@ void asio_rpc_session::close()
boost::system::error_code ec;
_socket->shutdown(boost::asio::socket_base::shutdown_type::shutdown_both,
ec);
if (ec)
- LOG_WARNING("asio socket shutdown failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket shutdown failed, error = {}", ec.message());
_socket->close(ec);
if (ec)
- LOG_WARNING("asio socket close failed, error = %s",
ec.message().c_str());
+ LOG_WARNING_F("asio socket close failed, error = {}", ec.message());
}
void asio_rpc_session::connect()
diff --git a/src/runtime/rpc/network.cpp b/src/runtime/rpc/network.cpp
index 1f231ef33..09cfff05b 100644
--- a/src/runtime/rpc/network.cpp
+++ b/src/runtime/rpc/network.cpp
@@ -679,8 +679,8 @@ void
connection_oriented_network::on_server_session_accepted(rpc_session_ptr &s)
// nothing to do
} else {
pr.first->second = s;
- LOG_WARNING("server session already exists, remote_client = %s,
preempted",
- s->remote_address().to_string());
+ LOG_WARNING_F("server session already exists, remote_client = {},
preempted",
+ s->remote_address());
}
ip_count = (int)_servers.size();
diff --git a/src/runtime/rpc/rpc_engine.cpp b/src/runtime/rpc/rpc_engine.cpp
index d882d2a13..cd8e630f4 100644
--- a/src/runtime/rpc/rpc_engine.cpp
+++ b/src/runtime/rpc/rpc_engine.cpp
@@ -457,9 +457,8 @@ error_code rpc_engine::start(const service_app_spec &aspec)
factory = it1->second.factory_name;
blk_size = it1->second.message_buffer_block_size;
} else {
- LOG_WARNING(
- "network client for channel %s not registered, assuming
not used further",
- c.to_string());
+ LOG_WARNING_F(
+ "network client for channel {} not registered, assuming
not used further", c);
continue;
}
@@ -502,10 +501,10 @@ error_code rpc_engine::start(const service_app_spec
&aspec)
(*pnets)[sp.second.channel].reset(net);
- LOG_WARNING("[%s] network server started at port %u, channel = %s,
...",
- node()->full_name(),
- (uint32_t)(port),
- sp.second.channel.to_string());
+ LOG_WARNING_F("[{}] network server started at port {}, channel = {},
...",
+ node()->full_name(),
+ port,
+ sp.second.channel);
}
_local_primary_address = _client_nets[NET_HDR_DSN][0]->address();
@@ -534,11 +533,10 @@ bool rpc_engine::unregister_rpc_handler(dsn::task_code
rpc_code)
void rpc_engine::on_recv_request(network *net, message_ex *msg, int delay_ms)
{
if (!_is_serving) {
- LOG_WARNING(
- "recv message with rpc name %s from %s when rpc engine is not
serving, trace_id = "
- "%" PRIu64,
+ LOG_WARNING_F(
+ "recv message with rpc name {} from {} when rpc engine is not
serving, trace_id = {}",
msg->header->rpc_name,
- msg->header->from_address.to_string(),
+ msg->header->from_address,
msg->header->trace_id);
CHECK_EQ_MSG(msg->get_count(), 0, "request should not be referenced by
anybody so far");
@@ -584,10 +582,10 @@ void rpc_engine::on_recv_request(network *net, message_ex
*msg, int delay_ms)
tsk->release_ref();
}
} else {
- LOG_WARNING("recv message with unhandled rpc name %s from %s,
trace_id = %016" PRIx64,
- msg->header->rpc_name,
- msg->header->from_address.to_string(),
- msg->header->trace_id);
+ LOG_WARNING_F("recv message with unhandled rpc name {} from {},
trace_id = {:#018x}",
+ msg->header->rpc_name,
+ msg->header->from_address,
+ msg->header->trace_id);
CHECK_EQ_MSG(msg->get_count(), 0, "request should not be
referenced by anybody so far");
msg->add_ref();
@@ -595,10 +593,10 @@ void rpc_engine::on_recv_request(network *net, message_ex
*msg, int delay_ms)
msg->release_ref();
}
} else {
- LOG_WARNING("recv message with unknown rpc name %s from %s, trace_id =
%016" PRIx64,
- msg->header->rpc_name,
- msg->header->from_address.to_string(),
- msg->header->trace_id);
+ LOG_WARNING_F("recv message with unknown rpc name {} from {}, trace_id
= {:#018x}",
+ msg->header->rpc_name,
+ msg->header->from_address,
+ msg->header->trace_id);
CHECK_EQ_MSG(msg->get_count(), 0, "request should not be referenced by
anybody so far");
msg->add_ref();
@@ -652,10 +650,10 @@ void rpc_engine::call_ip(rpc_address addr,
"from address must be set before call call_ip");
while (!request->dl.is_alone()) {
- LOG_WARNING("msg request %s (trace_id = %016" PRIx64
- ") is in sending queue, try to pick out ...",
- request->header->rpc_name,
- request->header->trace_id);
+ LOG_WARNING_F(
+ "msg request {} (trace_id = {:#018x}) is in sending queue, try to
pick out ...",
+ request->header->rpc_name,
+ request->header->trace_id);
auto s = request->io_session;
if (s.get() != nullptr) {
s->cancel(request);
diff --git a/src/runtime/security/client_negotiation.cpp
b/src/runtime/security/client_negotiation.cpp
index 30901b1e7..ab741a59b 100644
--- a/src/runtime/security/client_negotiation.cpp
+++ b/src/runtime/security/client_negotiation.cpp
@@ -126,7 +126,7 @@ void client_negotiation::on_mechanism_selected(const
negotiation_response &resp)
if (!err_s.is_ok()) {
LOG_WARNING_F("{}: initialize sasl client failed, error = {}, reason =
{}",
_name,
- err_s.code().to_string(),
+ err_s.code(),
err_s.description());
fail_negotiation();
return;
@@ -141,7 +141,7 @@ void client_negotiation::on_mechanism_selected(const
negotiation_response &resp)
} else {
LOG_WARNING_F("{}: start sasl client failed, error = {}, reason = {}",
_name,
- err_s.code().to_string(),
+ err_s.code(),
err_s.description());
fail_negotiation();
}
diff --git a/src/runtime/security/kinit_context.cpp
b/src/runtime/security/kinit_context.cpp
index c617c1f7f..810639947 100644
--- a/src/runtime/security/kinit_context.cpp
+++ b/src/runtime/security/kinit_context.cpp
@@ -268,7 +268,7 @@ void kinit_context::schedule_renew_credentials()
get_credentials();
schedule_renew_credentials();
} else if (err == boost::system::errc::operation_canceled) {
- LOG_WARNING("the renew credentials timer is cancelled");
+ LOG_WARNING_F("the renew credentials timer is cancelled");
} else {
CHECK(false, "unhandled error({})", err.message());
}
diff --git a/src/runtime/security/server_negotiation.cpp
b/src/runtime/security/server_negotiation.cpp
index af6ec36a1..16586b7c2 100644
--- a/src/runtime/security/server_negotiation.cpp
+++ b/src/runtime/security/server_negotiation.cpp
@@ -92,7 +92,7 @@ void server_negotiation::on_select_mechanism(negotiation_rpc
rpc)
if (!err_s.is_ok()) {
LOG_WARNING_F("{}: server initialize sasl failed, error = {}, msg =
{}",
_name,
- err_s.code().to_string(),
+ err_s.code(),
err_s.description());
fail_negotiation();
return;
@@ -147,7 +147,7 @@ void server_negotiation::do_challenge(negotiation_rpc rpc,
error_s err_s, const
} else {
LOG_WARNING_F("{}: retrive user name failed: with err = {}, msg =
{}",
_name,
- retrive_err.code().to_string(),
+ retrive_err.code(),
retrive_err.description());
fail_negotiation();
}
diff --git a/src/runtime/task/task.cpp b/src/runtime/task/task.cpp
index 0ceed65d5..556f3a003 100644
--- a/src/runtime/task/task.cpp
+++ b/src/runtime/task/task.cpp
@@ -251,11 +251,11 @@ static void check_wait_task(task *waitee)
task::get_current_worker()->pool_spec().worker_count > 1)
return;
- LOG_WARNING("task %s waits for another task %s sharing the same thread
pool "
- "- will lead to deadlocks easily (e.g., when worker_count = 1
or when the pool "
- "is partitioned)",
- task::get_current_task()->spec().code.to_string(),
- waitee->spec().code.to_string());
+ LOG_WARNING_F("task {} waits for another task {} sharing the same thread
pool "
+ "- will lead to deadlocks easily (e.g., when worker_count =
1 or when the pool "
+ "is partitioned)",
+ task::get_current_task()->code(),
+ waitee->code());
}
bool task::wait_on_cancel()
diff --git a/src/runtime/task/task_spec.cpp b/src/runtime/task/task_spec.cpp
index 9cc1762fc..06846e8ef 100644
--- a/src/runtime/task/task_spec.cpp
+++ b/src/runtime/task/task_spec.cpp
@@ -84,19 +84,19 @@ void task_spec::register_task_code(task_code code,
enum_to_string(spec->type));
if (spec->priority != pri) {
- LOG_WARNING("overwrite priority for task %s from %s to %s",
- code.to_string(),
- enum_to_string(spec->priority),
- enum_to_string(pri));
+ LOG_WARNING_F("overwrite priority for task {} from {} to {}",
+ code,
+ enum_to_string(spec->priority),
+ enum_to_string(pri));
spec->priority = pri;
}
if (spec->pool_code != pool) {
if (spec->pool_code != THREAD_POOL_INVALID) {
- LOG_WARNING("overwrite default thread pool for task %s from %s
to %s",
- code.to_string(),
- spec->pool_code.to_string(),
- pool.to_string());
+ LOG_WARNING_F("overwrite default thread pool for task {} from
{} to {}",
+ code,
+ spec->pool_code,
+ pool);
}
spec->pool_code = pool;
}
diff --git a/src/runtime/task/task_worker.cpp b/src/runtime/task/task_worker.cpp
index 39339dac9..f19da169e 100644
--- a/src/runtime/task/task_worker.cpp
+++ b/src/runtime/task/task_worker.cpp
@@ -30,6 +30,7 @@
#include <sstream>
#include "utils/process_utils.h"
+#include "utils/safe_strerror_posix.h"
#include "utils/smart_pointers.h"
#include "task_engine.h"
@@ -103,7 +104,8 @@ void task_worker::set_name(const char *name)
#endif // defined(__linux__)
// We expect EPERM failures in sandboxed processes, just ignore those.
if (err < 0 && errno != EPERM) {
- LOG_WARNING("Fail to set pthread name. err = %d", err);
+ LOG_WARNING_F(
+ "Fail to set pthread name: err = {}, msg = {}", err,
utils::safe_strerror(errno));
}
}
@@ -128,7 +130,9 @@ void task_worker::set_priority(worker_priority_t pri)
succ = false;
}
if (!succ) {
- LOG_WARNING("You may need priviledge to set thread priority. errno =
%d", errno);
+ LOG_WARNING_F("You may need priviledge to set thread priority: errno =
{}, msg = {}",
+ errno,
+ utils::safe_strerror(errno));
}
}
@@ -159,7 +163,8 @@ void task_worker::set_affinity(uint64_t affinity)
err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
if (err != 0) {
- LOG_WARNING("Fail to set thread affinity. err = %d", err);
+ LOG_WARNING_F(
+ "Fail to set thread affinity: err = {}, msg = {}", err,
utils::safe_strerror(errno));
}
#endif // defined(__linux__)
}
diff --git a/src/runtime/zlocks.cpp b/src/runtime/zlocks.cpp
index 378aea6a1..f9f08cd7f 100644
--- a/src/runtime/zlocks.cpp
+++ b/src/runtime/zlocks.cpp
@@ -47,22 +47,20 @@ __thread int zlock_shared_count;
void check_wait_safety()
{
if (zlock_exclusive_count + zlock_shared_count > 0) {
- LOG_WARNING(
- "wait inside locks may lead to deadlocks - current thread owns %u
exclusive locks "
- "and %u shared locks now.",
- zlock_exclusive_count,
- zlock_shared_count);
+ LOG_WARNING_F("wait inside locks may lead to deadlocks - current
thread owns {} exclusive "
+ "locks and {} shared locks now.",
+ zlock_exclusive_count,
+ zlock_shared_count);
}
}
void check_dangling_lock()
{
if (zlock_exclusive_count + zlock_shared_count > 0) {
- LOG_WARNING(
- "locks should not be hold at this point - current thread owns %u
exclusive locks and "
- "%u shared locks now.",
- zlock_exclusive_count,
- zlock_shared_count);
+ LOG_WARNING_F("locks should not be hold at this point - current thread
owns {} exclusive "
+ "locks and {} shared locks now.",
+ zlock_exclusive_count,
+ zlock_shared_count);
}
}
} // namespace lock_checker
diff --git a/src/server/available_detector.cpp
b/src/server/available_detector.cpp
index 86bcea5f2..68297524f 100644
--- a/src/server/available_detector.cpp
+++ b/src/server/available_detector.cpp
@@ -260,9 +260,8 @@ bool available_detector::generate_hash_keys()
}
return true;
} else {
- LOG_WARNING("Get partition count of table '%s' on cluster '%s' failed",
- _app_name.c_str(),
- _cluster_name.c_str());
+ LOG_WARNING_F(
+ "Get partition count of table '{}' on cluster '{}' failed",
_app_name, _cluster_name);
return false;
}
}
diff --git a/src/server/info_collector.cpp b/src/server/info_collector.cpp
index 7edfbfa79..1cc830284 100644
--- a/src/server/info_collector.cpp
+++ b/src/server/info_collector.cpp
@@ -261,10 +261,10 @@ void info_collector::on_capacity_unit_stat(int
remaining_retry_count)
std::vector<node_capacity_unit_stat> nodes_stat;
if (!get_capacity_unit_stat(_shell_context.get(), nodes_stat)) {
if (remaining_retry_count > 0) {
- LOG_WARNING("get capacity unit stat failed, remaining_retry_count
= %d, "
- "wait %u seconds to retry",
- remaining_retry_count,
- _capacity_unit_retry_wait_seconds);
+ LOG_WARNING_F("get capacity unit stat failed,
remaining_retry_count = {}, "
+ "wait {} seconds to retry",
+ remaining_retry_count,
+ _capacity_unit_retry_wait_seconds);
::dsn::tasking::enqueue(LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER,
&_tracker,
[=] {
on_capacity_unit_stat(remaining_retry_count - 1); },
@@ -308,10 +308,10 @@ void info_collector::on_storage_size_stat(int
remaining_retry_count)
app_storage_size_stat st_stat;
if (!get_storage_size_stat(_shell_context.get(), st_stat)) {
if (remaining_retry_count > 0) {
- LOG_WARNING("get storage size stat failed, remaining_retry_count =
%d, "
- "wait %u seconds to retry",
- remaining_retry_count,
- _storage_size_retry_wait_seconds);
+ LOG_WARNING_F("get storage size stat failed, remaining_retry_count
= {}, wait {} "
+ "seconds to retry",
+ remaining_retry_count,
+ _storage_size_retry_wait_seconds);
::dsn::tasking::enqueue(LPC_PEGASUS_STORAGE_SIZE_STAT_TIMER,
&_tracker,
[=] {
on_storage_size_stat(remaining_retry_count - 1); },
diff --git a/src/server/pegasus_server_impl.cpp
b/src/server/pegasus_server_impl.cpp
index 5d3c172be..5dc29b475 100644
--- a/src/server/pegasus_server_impl.cpp
+++ b/src/server/pegasus_server_impl.cpp
@@ -152,7 +152,7 @@ void pegasus_server_impl::gc_checkpoints(bool
force_reserve_one)
}
time_t tm;
if (!dsn::utils::filesystem::last_write_time(current_file, tm)) {
- LOG_WARNING("get last write time of file %s failed",
current_file.c_str());
+ LOG_WARNING_F("get last write time of file {} failed",
current_file);
break;
}
auto last_write_time = (uint64_t)tm;
@@ -327,7 +327,7 @@ void pegasus_server_impl::on_get(get_rpc rpc)
LOG_WARNING_PREFIX("rocksdb abnormal get from {}: "
"hash_key = {}, sort_key = {}, return = {}, "
"value_size = {}, time_used = {} ns",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
::pegasus::utils::c_escape_string(hash_key),
::pegasus::utils::c_escape_string(sort_key),
status.ToString(),
@@ -438,24 +438,23 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc)
if (c > 0 || (c == 0 && (!start_inclusive || !stop_inclusive))) {
// empty sort key range
if (_verbose_log) {
- LOG_WARNING(
- "%s: empty sort key range for multi_get from %s: hash_key
= \"%s\", "
- "start_sort_key = \"%s\" (%s), stop_sort_key = \"%s\"
(%s), "
- "sort_key_filter_type = %s, sort_key_filter_pattern =
\"%s\", "
- "final_start = \"%s\" (%s), final_stop = \"%s\" (%s)",
- replica_name(),
- rpc.remote_address().to_string(),
-
::pegasus::utils::c_escape_string(request.hash_key).c_str(),
-
::pegasus::utils::c_escape_string(request.start_sortkey).c_str(),
+ LOG_WARNING_PREFIX(
+ "empty sort key range for multi_get from {}: hash_key =
\"{}\", start_sort_key "
+ "= \"{}\" ({}), stop_sort_key = \"{}\" ({}),
sort_key_filter_type = {}, "
+ "sort_key_filter_pattern = \"{}\", final_start = \"{}\"
({}), final_stop = "
+ "\"{}\" ({})",
+ rpc.remote_address(),
+ ::pegasus::utils::c_escape_string(request.hash_key),
+ ::pegasus::utils::c_escape_string(request.start_sortkey),
request.start_inclusive ? "inclusive" : "exclusive",
-
::pegasus::utils::c_escape_string(request.stop_sortkey).c_str(),
+ ::pegasus::utils::c_escape_string(request.stop_sortkey),
request.stop_inclusive ? "inclusive" : "exclusive",
::dsn::apps::_filter_type_VALUES_TO_NAMES.find(request.sort_key_filter_type)
->second,
-
::pegasus::utils::c_escape_string(request.sort_key_filter_pattern).c_str(),
- ::pegasus::utils::c_escape_string(start).c_str(),
+
::pegasus::utils::c_escape_string(request.sort_key_filter_pattern),
+ ::pegasus::utils::c_escape_string(start),
start_inclusive ? "inclusive" : "exclusive",
- ::pegasus::utils::c_escape_string(stop).c_str(),
+ ::pegasus::utils::c_escape_string(stop),
stop_inclusive ? "inclusive" : "exclusive");
}
resp.error = rocksdb::Status::kOk;
@@ -641,7 +640,7 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc)
if (limiter->exceed_limit()) {
LOG_WARNING_PREFIX(
"rocksdb abnormal scan from {}: time_used({}ns) VS
time_threshold({}ns)",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
limiter->duration_time(),
limiter->max_duration_time());
}
@@ -744,7 +743,7 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc)
"max_kv_count = {}, max_kv_size = {}, reverse = {}, "
"result_count = {}, result_size = {}, iteration_count = {}, "
"expire_count = {}, filter_count = {}, time_used = {} ns",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
::pegasus::utils::c_escape_string(request.hash_key),
::pegasus::utils::c_escape_string(request.start_sortkey),
request.start_inclusive ? "inclusive" : "exclusive",
@@ -881,7 +880,7 @@ void pegasus_server_impl::on_batch_get(batch_get_rpc rpc)
LOG_WARNING_PREFIX(
"rocksdb abnormal batch_get from {}: total data size = {}, row
count = {}, "
"time_used = {} us",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
total_data_size,
request.keys.size(),
time_used / 1000);
@@ -966,7 +965,7 @@ void
pegasus_server_impl::on_sortkey_count(sortkey_count_rpc rpc)
resp.count = 0;
} else if (limiter->exceed_limit()) {
LOG_WARNING_PREFIX("rocksdb abnormal scan from {}: time_used({}ns) VS
time_threshold({}ns)",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
limiter->duration_time(),
limiter->max_duration_time());
resp.count = -1;
@@ -1129,14 +1128,13 @@ void
pegasus_server_impl::on_get_scanner(get_scanner_rpc rpc)
if (c > 0 || (c == 0 && (!start_inclusive || !stop_inclusive))) {
// empty key range
if (_verbose_log) {
- LOG_WARNING("%s: empty key range for get_scanner from %s: "
- "start_key = \"%s\" (%s), stop_key = \"%s\" (%s)",
- replica_name(),
- rpc.remote_address().to_string(),
-
::pegasus::utils::c_escape_string(request.start_key).c_str(),
- request.start_inclusive ? "inclusive" : "exclusive",
-
::pegasus::utils::c_escape_string(request.stop_key).c_str(),
- request.stop_inclusive ? "inclusive" : "exclusive");
+ LOG_WARNING_PREFIX("empty key range for get_scanner from {}:
start_key = \"{}\" ({}), "
+ "stop_key = \"{}\" ({})",
+ rpc.remote_address(),
+
::pegasus::utils::c_escape_string(request.start_key),
+ request.start_inclusive ? "inclusive" :
"exclusive",
+
::pegasus::utils::c_escape_string(request.stop_key),
+ request.stop_inclusive ? "inclusive" :
"exclusive");
}
resp.error = rocksdb::Status::kOk;
_cu_calculator->add_scan_cu(req, resp.error, resp.kvs);
@@ -1260,7 +1258,7 @@ void pegasus_server_impl::on_get_scanner(get_scanner_rpc
rpc)
resp.error = rocksdb::Status::kIncomplete;
LOG_WARNING_PREFIX("rocksdb abnormal scan from {}: batch_count={},
time_used_ns({}) VS "
"time_threshold_ns({})",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
batch_count,
limiter->duration_time(),
limiter->max_duration_time());
@@ -1432,7 +1430,7 @@ void pegasus_server_impl::on_scan(scan_rpc rpc)
resp.error = rocksdb::Status::kIncomplete;
LOG_WARNING_PREFIX("rocksdb abnormal scan from {}: batch_count={},
time_used({}ns) VS "
"time_threshold({}ns)",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
batch_count,
limiter->duration_time(),
limiter->max_duration_time());
diff --git a/src/server/result_writer.cpp b/src/server/result_writer.cpp
index b2efd12b3..a88ce284f 100644
--- a/src/server/result_writer.cpp
+++ b/src/server/result_writer.cpp
@@ -41,13 +41,13 @@ void result_writer::set_result(const std::string &hash_key,
if (err != PERR_OK) {
int new_try_count = try_count - 1;
if (new_try_count > 0) {
- LOG_WARNING("set_result fail, hash_key = %s, sort_key = %s,
value = %s, "
- "error = %s, left_try_count = %d, try again after
1 minute",
- hash_key.c_str(),
- sort_key.c_str(),
- value.c_str(),
- _client->get_error_string(err),
- new_try_count);
+ LOG_WARNING_F("set_result fail, hash_key = {}, sort_key = {},
value = {}, "
+ "error = {}, left_try_count = {}, try again
after 1 minute",
+ hash_key,
+ sort_key,
+ value,
+ _client->get_error_string(err),
+ new_try_count);
::dsn::tasking::enqueue(
LPC_WRITE_RESULT,
&_tracker,
diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h
index 030cc4a04..afbba849f 100644
--- a/src/shell/command_helper.h
+++ b/src/shell/command_helper.h
@@ -1265,8 +1265,7 @@ inline bool get_capacity_unit_stat(shell_context *sc,
dsn::rpc_address node_addr = nodes[i].address;
dsn::perf_counter_info info;
if (!decode_node_perf_counter_info(node_addr, results[i], info)) {
- LOG_WARNING("decode perf counter from node(%s) failed, just ignore
it",
- node_addr.to_string());
+ LOG_WARNING_F("decode perf counter from node({}) failed, just
ignore it", node_addr);
continue;
}
nodes_stat[i].timestamp = info.timestamp_str;
@@ -1333,8 +1332,7 @@ inline bool get_storage_size_stat(shell_context *sc,
app_storage_size_stat &st_s
dsn::rpc_address node_addr = nodes[i].address;
dsn::perf_counter_info info;
if (!decode_node_perf_counter_info(node_addr, results[i], info)) {
- LOG_WARNING("decode perf counter from node(%s) failed, just ignore
it",
- node_addr.to_string());
+ LOG_WARNING_F("decode perf counter from node({}) failed, just
ignore it", node_addr);
continue;
}
for (dsn::perf_counter_metric &m : info.counters) {
diff --git a/src/test/pressure_test/main.cpp b/src/test/pressure_test/main.cpp
index 48bcc4e5b..2119b812d 100644
--- a/src/test/pressure_test/main.cpp
+++ b/src/test/pressure_test/main.cpp
@@ -154,11 +154,9 @@ void test_get(int32_t qps)
val);
} else if (ec == PERR_NOT_FOUND) {
// don't output info
- // LOG_WARNING("hashkey(%s) - sortkey(%s) doesn't
exist in the server",
- // hashkey.c_str(), sortkey.c_str());
} else if (ec == PERR_TIMEOUT) {
- LOG_WARNING("access server failed with err(%s)",
- pg_client->get_error_string(ec));
+ LOG_WARNING_F("access server failed with err({})",
+ pg_client->get_error_string(ec));
}
});
cnt -= 1;
diff --git a/src/utils/api_utilities.h b/src/utils/api_utilities.h
index e190f990a..eca751d27 100644
--- a/src/utils/api_utilities.h
+++ b/src/utils/api_utilities.h
@@ -87,7 +87,6 @@ extern void dsn_coredump();
dsn_logf(__FILENAME__, __FUNCTION__, __LINE__, level,
__VA_ARGS__); \
} while (false)
-#define LOG_WARNING(...) dlog(LOG_LEVEL_WARNING, __VA_ARGS__)
#define LOG_ERROR(...) dlog(LOG_LEVEL_ERROR, __VA_ARGS__)
#define dreturn_not_ok_logged(err, ...)
\
diff --git a/src/utils/filesystem.cpp b/src/utils/filesystem.cpp
index 6544750a6..d1e005d39 100644
--- a/src/utils/filesystem.cpp
+++ b/src/utils/filesystem.cpp
@@ -335,7 +335,7 @@ static bool remove_directory(const std::string &npath)
boost::filesystem::remove_all(npath, ec);
// TODO(wutao1): return the specific error to caller
if (dsn_unlikely(bool(ec))) {
- LOG_WARNING("remove %s failed, err = %s", npath.c_str(),
ec.message().c_str());
+ LOG_WARNING_F("remove {} failed, err = {}", npath, ec.message());
return false;
}
return true;
@@ -358,8 +358,7 @@ bool remove_path(const std::string &path)
if (dsn::utils::filesystem::path_exists_internal(npath, FTW_F)) {
bool ret = (::remove(npath.c_str()) == 0);
if (!ret) {
- LOG_WARNING(
- "remove file %s failed, err = %s", path.c_str(),
safe_strerror(errno).c_str());
+ LOG_WARNING_F("remove file {} failed, err = {}", path,
safe_strerror(errno));
}
return ret;
} else if (dsn::utils::filesystem::path_exists_internal(npath, FTW_D)) {
@@ -375,10 +374,8 @@ bool rename_path(const std::string &path1, const
std::string &path2)
ret = (::rename(path1.c_str(), path2.c_str()) == 0);
if (!ret) {
- LOG_WARNING("rename from '%s' to '%s' failed, err = %s",
- path1.c_str(),
- path2.c_str(),
- safe_strerror(errno).c_str());
+ LOG_WARNING_F(
+ "rename from '{}' to '{}' failed, err = {}", path1, path2,
safe_strerror(errno));
}
return ret;
@@ -483,10 +480,10 @@ bool create_directory(const std::string &path)
return true;
out_error:
- LOG_WARNING("create_directory %s failed due to cannot create the
component: %s, err = %s",
- path.c_str(),
- cpath.c_str(),
- safe_strerror(err).c_str());
+ LOG_WARNING_F("create_directory {} failed due to cannot create the
component: {}, err = {}",
+ path,
+ cpath,
+ safe_strerror(err));
return false;
}
@@ -531,12 +528,12 @@ bool create_file(const std::string &path)
fd = ::creat(npath.c_str(), mode);
if (fd == -1) {
err = errno;
- LOG_WARNING("create_file %s failed, err = %s", path.c_str(),
safe_strerror(err).c_str());
+ LOG_WARNING_F("create_file {} failed, err = {}", path,
safe_strerror(err));
return false;
}
if (::close_(fd) != 0) {
- LOG_WARNING("create_file %s, failed to close the file handle.",
path.c_str());
+ LOG_WARNING_F("create_file {}, failed to close the file handle.",
path);
}
return true;
diff --git a/src/utils/utils.cpp b/src/utils/utils.cpp
index f6262fa8c..77a503f5b 100644
--- a/src/utils/utils.cpp
+++ b/src/utils/utils.cpp
@@ -96,9 +96,9 @@ bool hostname_from_ip(uint32_t ip, std::string
*hostname_result)
char ip_str[256];
inet_ntop(AF_INET, &net_addr, ip_str, sizeof(ip_str));
if (err == EAI_SYSTEM) {
- LOG_WARNING("got error %s when try to resolve %s",
strerror(errno), ip_str);
+ LOG_WARNING_F("got error {} when try to resolve {}",
strerror(errno), ip_str);
} else {
- LOG_WARNING("return error(%s) when try to resolve %s",
gai_strerror(err), ip_str);
+ LOG_WARNING_F("return error({}) when try to resolve {}",
gai_strerror(err), ip_str);
}
return false;
} else {
@@ -126,7 +126,7 @@ bool hostname_from_ip_port(const char *ip_port, std::string
*hostname_result)
{
dsn::rpc_address addr;
if (!addr.from_string_ipv4(ip_port)) {
- LOG_WARNING("invalid ip_port(%s)", ip_port);
+ LOG_WARNING_F("invalid ip_port({})", ip_port);
*hostname_result = ip_port;
return false;
}
@@ -155,7 +155,7 @@ bool list_hostname_from_ip(const char *ip_list, std::string
*hostname_result_lis
dsn::utils::split_args(ip_list, splitted_ip, ',');
if (splitted_ip.empty()) {
- LOG_WARNING("invalid ip_list(%s)", ip_list);
+ LOG_WARNING_F("invalid ip_list({})", ip_list);
*hostname_result_list = *ip_list;
return false;
}
@@ -182,7 +182,7 @@ bool list_hostname_from_ip_port(const char *ip_port_list,
std::string *hostname_
dsn::utils::split_args(ip_port_list, splitted_ip_port, ',');
if (splitted_ip_port.empty()) {
- LOG_WARNING("invalid ip_list(%s)", ip_port_list);
+ LOG_WARNING_F("invalid ip_list({})", ip_port_list);
*hostname_result_list = *ip_port_list;
return false;
}
diff --git a/src/zookeeper/distributed_lock_service_zookeeper.cpp
b/src/zookeeper/distributed_lock_service_zookeeper.cpp
index 268aa9051..c81cb1fa3 100644
--- a/src/zookeeper/distributed_lock_service_zookeeper.cpp
+++ b/src/zookeeper/distributed_lock_service_zookeeper.cpp
@@ -104,7 +104,7 @@ error_code
distributed_lock_service_zookeeper::initialize(const std::vector<std:
if (_zoo_state != ZOO_CONNECTED_STATE) {
_waiting_attach.wait_for(zookeeper_session_mgr::instance().timeout());
if (_zoo_state != ZOO_CONNECTED_STATE) {
- LOG_WARNING(
+ LOG_WARNING_F(
"attach to zookeeper session timeout, distributed lock service
initialized failed");
return ERR_TIMEOUT;
}
@@ -282,7 +282,8 @@ void
distributed_lock_service_zookeeper::on_zoo_session_evt(lock_srv_ptr _this,
zookeeper_session::string_zoo_state(zoo_state));
_this->dispatch_zookeeper_session_expire();
} else {
- LOG_WARNING("get zoo state: %s, ignore it",
zookeeper_session::string_zoo_state(zoo_state));
+ LOG_WARNING_F("get zoo state: {}, ignore it",
+ zookeeper_session::string_zoo_state(zoo_state));
}
}
}
diff --git a/src/zookeeper/lock_struct.cpp b/src/zookeeper/lock_struct.cpp
index bc05acf54..fbd96cd2a 100644
--- a/src/zookeeper/lock_struct.cpp
+++ b/src/zookeeper/lock_struct.cpp
@@ -79,9 +79,9 @@ static bool is_zookeeper_timeout(int zookeeper_error)
#define __execute(cb, _this) tasking::enqueue(TASK_CODE_DLOCK, nullptr, cb,
_this->hash())
#define __add_ref_and_delay_call(op, _this)
\
- LOG_WARNING("operation %s on %s encounter error, retry later",
\
- zookeeper_session::string_zoo_operation(op->_optype),
\
- op->_input._path.c_str());
\
+ LOG_WARNING_F("operation {} on {} encounter error, retry later",
\
+ zookeeper_session::string_zoo_operation(op->_optype),
\
+ op->_input._path);
\
zookeeper_session::add_ref(op);
\
tasking::enqueue(TASK_CODE_DLOCK,
\
nullptr,
\
@@ -167,7 +167,7 @@ int64_t lock_struct::parse_seq_path(const std::string &path)
for (; i >= 0 && j >= 0 && path[i] == match[j]; --i, --j)
;
if (power == 1 || j >= 0) {
- LOG_WARNING("invalid path: %s", path.c_str());
+ LOG_WARNING_F("invalid path: {}", path);
return -1;
}
return ans;
@@ -196,8 +196,8 @@ void lock_struct::owner_change(lock_struct_ptr _this, int
zoo_event)
__check_code(_this->_state, allow_state, 3, string_state(_this->_state));
if (_this->_state == lock_state::uninitialized) {
- LOG_WARNING("this is mainly due to a timeout happens before, just
ignore the event %s",
- zookeeper_session::string_zoo_event(zoo_event));
+ LOG_WARNING_F("this is mainly due to a timeout happens before, just
ignore the event {}",
+ zookeeper_session::string_zoo_event(zoo_event));
return;
}
if (_this->_state == lock_state::cancelled || _this->_state ==
lock_state::expired) {
@@ -257,11 +257,11 @@ void
lock_struct::after_remove_duplicated_locknode(lock_struct_ptr _this,
void lock_struct::remove_duplicated_locknode(std::string &&znode_path)
{
lock_struct_ptr _this = this;
- LOG_WARNING(
- "duplicated value(%s) ephe/seq node(%s and %s) create on zookeeper,
remove the smaller one",
- _myself._node_value.c_str(),
- _owner._node_seq_name.c_str(),
- _myself._node_seq_name.c_str());
+ LOG_WARNING_F(
+ "duplicated value({}) ephe/seq node({} and {}) create on zookeeper,
remove the smaller one",
+ _myself._node_value,
+ _owner._node_seq_name,
+ _myself._node_seq_name);
auto delete_callback_wrapper = [_this](zookeeper_session::zoo_opcontext
*op) {
if (is_zookeeper_timeout(op->_output.error)) {
@@ -458,9 +458,7 @@ void lock_struct::after_get_lockdir_nodes(lock_struct_ptr
_this,
std::string &child = (*children)[i];
int64_t seq = parse_seq_path(child);
if (seq == -1) {
- LOG_WARNING("an invalid node(%s) in lockdir(%s), ignore",
- child.c_str(),
- _this->_lock_dir.c_str());
+ LOG_WARNING_F("an invalid node({}) in lockdir({}), ignore", child,
_this->_lock_dir);
continue;
}
if (min_pos == -1 || min_seq > seq)
@@ -472,11 +470,10 @@ void lock_struct::after_get_lockdir_nodes(lock_struct_ptr
_this,
LOG_INFO_F("min sequece number({}) in lockdir({})", min_seq,
_this->_lock_dir);
if (my_pos == -1) {
// znode removed on zookeeper, may timeout or removed by other
procedure
- LOG_WARNING(
- "sequence and ephemeral node(%s/%s) removed when get_children,
this is abnormal, "
- "try to reaquire the lock",
- _this->_lock_dir.c_str(),
- _this->_myself._node_seq_name.c_str());
+ LOG_WARNING_F("sequence and ephemeral node({}/{}) removed when
get_children, this is "
+ "abnormal, try to reaquire the lock",
+ _this->_lock_dir,
+ _this->_myself._node_seq_name);
_this->_myself._node_seq_name.clear();
_this->_myself._sequence_id = -1;
_this->create_locknode();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]