This is an automated email from the ASF dual-hosted git repository.

wangdan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git


The following commit(s) were added to refs/heads/master by this push:
     new c20a88d0e refactor(macro): use CHECK to replace dassert_f/dassert 
(#1205)
c20a88d0e is described below

commit c20a88d0ed110695e45fbd70ceef64a415034015
Author: Yingchun Lai <[email protected]>
AuthorDate: Thu Oct 27 16:42:03 2022 +0800

    refactor(macro): use CHECK to replace dassert_f/dassert (#1205)
---
 src/base/pegasus_utils.cpp                     |  4 +--
 src/base/value_schema_manager.cpp              |  4 +--
 src/base/value_schema_v0.cpp                   |  6 ++---
 src/base/value_schema_v1.cpp                   |  6 ++---
 src/base/value_schema_v2.cpp                   |  6 ++---
 src/block_service/fds/fds_service.cpp          | 22 ++++++++--------
 src/client_lib/pegasus_client_factory_impl.cpp |  2 +-
 src/client_lib/pegasus_scanner_impl.cpp        | 12 ++++-----
 src/common/replication_common.cpp              | 22 +++++++---------
 src/geo/bench/bench.cpp                        |  2 +-
 src/geo/lib/geo_client.cpp                     | 10 +++----
 src/geo/test/geo_test.cpp                      |  2 +-
 src/meta/backup_engine.cpp                     |  5 ++--
 src/meta/meta_server_failure_detector.cpp      |  4 +--
 src/meta/meta_split_service.cpp                |  8 +++---
 src/meta/meta_state_service_utils_impl.h       | 16 ++++++------
 src/meta/test/meta_app_operation_test.cpp      |  6 ++---
 src/redis_protocol/proxy_lib/proxy_layer.cpp   |  3 ++-
 src/redis_protocol/proxy_lib/redis_parser.cpp  | 29 ++++++++++-----------
 src/replica/duplication/replica_follower.cpp   |  4 +--
 src/replica/mutation_log.cpp                   |  3 ++-
 src/replica/replication_app_base.cpp           |  9 +++----
 src/reporter/pegasus_counter_reporter.cpp      |  2 +-
 src/runtime/rpc/thrift_message_parser.cpp      |  2 +-
 src/runtime/security/kinit_context.cpp         |  2 +-
 src/runtime/security/sasl_init.cpp             |  2 +-
 src/runtime/service_engine.cpp                 | 12 ++++-----
 src/server/available_detector.cpp              |  6 ++---
 src/server/capacity_unit_calculator.cpp        | 16 ++++++------
 src/server/hotkey_collector.cpp                |  2 +-
 src/server/info_collector.cpp                  |  8 +++---
 src/server/meta_store.cpp                      |  8 +++---
 src/server/pegasus_server_impl.cpp             | 30 ++++++++++-----------
 src/server/pegasus_server_impl_init.cpp        | 16 ++++++------
 src/server/pegasus_server_write.cpp            |  2 +-
 src/server/pegasus_write_service.cpp           |  2 +-
 src/server/pegasus_write_service_impl.h        |  2 +-
 src/shell/command_helper.h                     |  4 +--
 src/shell/commands/debugger.cpp                |  2 +-
 src/shell/commands/table_management.cpp        |  2 +-
 src/shell/main.cpp                             |  2 +-
 src/test/bench_test/benchmark.cpp              |  4 +--
 src/test/function_test/utils/global_env.cpp    |  4 +--
 src/test/kill_test/process_kill_testor.cpp     |  6 ++---
 src/test/pressure_test/main.cpp                | 36 ++++++++++++--------------
 src/utils/flags.cpp                            |  4 +--
 src/utils/fmt_logging.h                        |  3 +++
 src/utils/metrics.cpp                          |  6 ++---
 src/utils/nth_element.h                        |  6 ++---
 src/utils/test/metrics_test.cpp                | 10 +++----
 src/utils/test/nth_element_utils.h             |  9 +++----
 51 files changed, 192 insertions(+), 203 deletions(-)

diff --git a/src/base/pegasus_utils.cpp b/src/base/pegasus_utils.cpp
index 6dad198a6..da28c69e2 100644
--- a/src/base/pegasus_utils.cpp
+++ b/src/base/pegasus_utils.cpp
@@ -115,8 +115,8 @@ c_escape_string(const char *src, size_t src_len, char 
*dest, size_t dest_len, bo
 inline unsigned int hex_digit_to_int(char c)
 {
     /* Assume ASCII. */
-    dassert_f('0' == 0x30 && 'A' == 0x41 && 'a' == 0x61, "");
-    dassert_f(isxdigit(c), "");
+    CHECK('0' == 0x30 && 'A' == 0x41 && 'a' == 0x61, "");
+    CHECK(isxdigit(c), "");
     unsigned int x = static_cast<unsigned char>(c);
     if (x > '9') {
         x += 9;
diff --git a/src/base/value_schema_manager.cpp 
b/src/base/value_schema_manager.cpp
index 23d53cdf3..e532527fc 100644
--- a/src/base/value_schema_manager.cpp
+++ b/src/base/value_schema_manager.cpp
@@ -56,9 +56,7 @@ value_schema *value_schema_manager::get_value_schema(uint32_t 
meta_cf_data_versi
         return schema;
     } else {
         auto schema = get_value_schema(meta_cf_data_version);
-        if (nullptr == schema) {
-            dassert_f(false, "data version({}) in meta cf is not supported", 
meta_cf_data_version);
-        }
+        CHECK_NOTNULL(schema, "data version({}) in meta cf is not supported", 
meta_cf_data_version);
         return schema;
     }
 }
diff --git a/src/base/value_schema_v0.cpp b/src/base/value_schema_v0.cpp
index faba83895..a86f8f3ba 100644
--- a/src/base/value_schema_v0.cpp
+++ b/src/base/value_schema_v0.cpp
@@ -32,7 +32,7 @@ std::unique_ptr<value_field> 
value_schema_v0::extract_field(dsn::string_view val
         field = extract_timestamp(value);
         break;
     default:
-        dassert_f(false, "Unsupported field type: {}", type);
+        CHECK(false, "Unsupported field type: {}", type);
     }
     return field;
 }
@@ -51,7 +51,7 @@ void value_schema_v0::update_field(std::string &value, 
std::unique_ptr<value_fie
         update_expire_ts(value, std::move(field));
         break;
     default:
-        dassert_f(false, "Unsupported update field type: {}", type);
+        CHECK(false, "Unsupported update field type: {}", type);
     }
 }
 
@@ -62,7 +62,7 @@ rocksdb::SliceParts value_schema_v0::generate_value(const 
value_params &params)
     auto data_field =
         static_cast<user_data_field 
*>(params.fields[value_field_type::USER_DATA].get());
     if (dsn_unlikely(expire_ts_field == nullptr || data_field == nullptr)) {
-        dassert_f(false, "USER_DATA or EXPIRE_TIMESTAMP is not provided");
+        CHECK(false, "USER_DATA or EXPIRE_TIMESTAMP is not provided");
         return {nullptr, 0};
     }
 
diff --git a/src/base/value_schema_v1.cpp b/src/base/value_schema_v1.cpp
index aaf59f41f..7d28f18da 100644
--- a/src/base/value_schema_v1.cpp
+++ b/src/base/value_schema_v1.cpp
@@ -37,7 +37,7 @@ std::unique_ptr<value_field> 
value_schema_v1::extract_field(dsn::string_view val
         field = extract_time_tag(value);
         break;
     default:
-        dassert_f(false, "Unsupported field type: {}", type);
+        CHECK(false, "Unsupported field type: {}", type);
     }
     return field;
 }
@@ -56,7 +56,7 @@ void value_schema_v1::update_field(std::string &value, 
std::unique_ptr<value_fie
         update_expire_ts(value, std::move(field));
         break;
     default:
-        dassert_f(false, "Unsupported update field type: {}", type);
+        CHECK(false, "Unsupported update field type: {}", type);
     }
 }
 
@@ -70,7 +70,7 @@ rocksdb::SliceParts value_schema_v1::generate_value(const 
value_params &params)
         static_cast<user_data_field 
*>(params.fields[value_field_type::USER_DATA].get());
     if (dsn_unlikely(expire_ts_field == nullptr || data_field == nullptr ||
                      timetag_field == nullptr)) {
-        dassert_f(false, "USER_DATA or EXPIRE_TIMESTAMP or TIME_TAG is not 
provided");
+        CHECK(false, "USER_DATA or EXPIRE_TIMESTAMP or TIME_TAG is not 
provided");
         return {nullptr, 0};
     }
 
diff --git a/src/base/value_schema_v2.cpp b/src/base/value_schema_v2.cpp
index eafd5c55c..46fb2ca54 100644
--- a/src/base/value_schema_v2.cpp
+++ b/src/base/value_schema_v2.cpp
@@ -38,7 +38,7 @@ std::unique_ptr<value_field> 
value_schema_v2::extract_field(dsn::string_view val
         field = extract_time_tag(value);
         break;
     default:
-        dassert_f(false, "Unsupported field type: {}", type);
+        CHECK(false, "Unsupported field type: {}", type);
     }
     return field;
 }
@@ -58,7 +58,7 @@ void value_schema_v2::update_field(std::string &value, 
std::unique_ptr<value_fie
         update_expire_ts(value, std::move(field));
         break;
     default:
-        dassert_f(false, "Unsupported update field type: {}", type);
+        CHECK(false, "Unsupported update field type: {}", type);
     }
 }
 
@@ -72,7 +72,7 @@ rocksdb::SliceParts value_schema_v2::generate_value(const 
value_params &params)
         static_cast<user_data_field 
*>(params.fields[value_field_type::USER_DATA].get());
     if (dsn_unlikely(expire_ts_field == nullptr || data_field == nullptr ||
                      timetag_field == nullptr)) {
-        dassert_f(false, "USER_DATA or EXPIRE_TIMESTAMP or TIME_TAG is not 
provided");
+        CHECK(false, "USER_DATA or EXPIRE_TIMESTAMP or TIME_TAG is not 
provided");
         return {nullptr, 0};
     }
 
diff --git a/src/block_service/fds/fds_service.cpp 
b/src/block_service/fds/fds_service.cpp
index 67064c331..6155b2c35 100644
--- a/src/block_service/fds/fds_service.cpp
+++ b/src/block_service/fds/fds_service.cpp
@@ -206,19 +206,19 @@ dsn::task_ptr fds_service::list_dir(const ls_request &req,
                 // fds listing's objects are with full-path, we must extract 
the postfix to emulate
                 // the filesystem structure
                 for (const galaxy::fds::FDSObjectSummary &obj : objs) {
-                    dassert(fds_path.empty() || 
boost::starts_with(obj.objectName(), fds_path),
-                            "invalid path(%s) in parent(%s)",
-                            obj.objectName().c_str(),
-                            fds_path.c_str());
+                    CHECK(fds_path.empty() || 
boost::starts_with(obj.objectName(), fds_path),
+                          "invalid path({}) in parent({})",
+                          obj.objectName(),
+                          fds_path);
                     resp.entries->push_back(
                         
{utils::path_from_fds(obj.objectName().substr(fds_path.size()), false),
                          false});
                 }
                 for (const std::string &s : common_prefix) {
-                    dassert(fds_path.empty() || boost::starts_with(s, 
fds_path),
-                            "invalid path(%s) in parent(%s)",
-                            s.c_str(),
-                            fds_path.c_str());
+                    CHECK(fds_path.empty() || boost::starts_with(s, fds_path),
+                          "invalid path({}) in parent({})",
+                          s,
+                          fds_path);
                     resp.entries->push_back(
                         {utils::path_from_fds(s.substr(fds_path.size()), 
true), true});
                 }
@@ -412,14 +412,14 @@ error_code fds_file_object::get_file_meta()
                   fds_service::FILE_LENGTH_CUSTOM_KEY.c_str(),
                   _fds_path.c_str());
         bool valid = dsn::buf2uint64(iter->second, _size);
-        dassert_f(valid, "error to get file size");
+        CHECK(valid, "error to get file size");
 
         // get md5 key
         iter = meta.find(fds_service::FILE_MD5_KEY);
         dassert_f(iter != meta.end(),
                   "can't find {} in object({})'s metadata",
-                  fds_service::FILE_MD5_KEY.c_str(),
-                  _fds_path.c_str());
+                  fds_service::FILE_MD5_KEY,
+                  _fds_path);
         _md5sum = iter->second;
 
         _has_meta_synced = true;
diff --git a/src/client_lib/pegasus_client_factory_impl.cpp 
b/src/client_lib/pegasus_client_factory_impl.cpp
index 2cbac1e79..b4f6b4a1c 100644
--- a/src/client_lib/pegasus_client_factory_impl.cpp
+++ b/src/client_lib/pegasus_client_factory_impl.cpp
@@ -30,7 +30,7 @@ bool pegasus_client_factory_impl::initialize(const char 
*config_file)
 {
     bool is_initialized = ::dsn::tools::is_engine_ready();
     if (config_file == nullptr) {
-        dassert(is_initialized, "rdsn engine not started, please specify a 
valid config file");
+        CHECK(is_initialized, "rdsn engine not started, please specify a valid 
config file");
     } else {
         if (is_initialized) {
             LOG_WARNING("rdsn engine already started, ignore the config file 
'%s'", config_file);
diff --git a/src/client_lib/pegasus_scanner_impl.cpp 
b/src/client_lib/pegasus_scanner_impl.cpp
index 7b10e6e94..b10c56e8a 100644
--- a/src/client_lib/pegasus_scanner_impl.cpp
+++ b/src/client_lib/pegasus_scanner_impl.cpp
@@ -142,7 +142,7 @@ 
pegasus_client_impl::pegasus_scanner_impl::get_smart_wrapper()
 void pegasus_client_impl::pegasus_scanner_impl::_async_next_internal()
 {
     // _lock will be locked out of the while block
-    dassert(!_queue.empty(), "queue should not be empty when 
_async_next_internal start");
+    CHECK(!_queue.empty(), "queue should not be empty when 
_async_next_internal start");
 
     std::list<async_scan_next_callback_t> temp;
     while (true) {
@@ -232,7 +232,7 @@ void 
pegasus_client_impl::pegasus_scanner_impl::_next_batch()
     ::dsn::apps::scan_request req;
     req.context_id = _context;
 
-    dassert(!_rpc_started, "");
+    CHECK(!_rpc_started, "");
     _rpc_started = true;
     _client->scan(req,
                   [this](::dsn::error_code err,
@@ -267,7 +267,7 @@ void 
pegasus_client_impl::pegasus_scanner_impl::_start_scan()
     req.__set_full_scan(_full_scan);
     req.__set_only_return_count(_options.only_return_count);
 
-    dassert(!_rpc_started, "");
+    CHECK(!_rpc_started, "");
     _rpc_started = true;
     _client->get_scanner(
         req,
@@ -282,7 +282,7 @@ void 
pegasus_client_impl::pegasus_scanner_impl::_on_scan_response(::dsn::error_c
                                                                   
dsn::message_ex *req,
                                                                   
dsn::message_ex *resp)
 {
-    dassert(_rpc_started, "");
+    CHECK(_rpc_started, "");
     _rpc_started = false;
     ::dsn::apps::scan_response response;
     if (err == ERR_OK) {
@@ -347,8 +347,8 @@ 
pegasus_client_impl::pegasus_scanner_impl::~pegasus_scanner_impl()
 {
     dsn::zauto_lock l(_lock);
 
-    dassert(!_rpc_started, "all scan-rpc should be completed here");
-    dassert(_queue.empty(), "queue should be empty");
+    CHECK(!_rpc_started, "all scan-rpc should be completed here");
+    CHECK(_queue.empty(), "queue should be empty");
 
     if (_client) {
         if (_context >= SCAN_CONTEXT_ID_VALID_MIN)
diff --git a/src/common/replication_common.cpp 
b/src/common/replication_common.cpp
index 51bf51b41..0eb321154 100644
--- a/src/common/replication_common.cpp
+++ b/src/common/replication_common.cpp
@@ -141,7 +141,7 @@ void replication_options::initialize()
     std::string error_msg = "";
     bool flag = get_data_dir_and_tag(
         dirs_str, app_dir, app_name, config_data_dirs, config_data_dir_tags, 
error_msg);
-    dassert_f(flag, error_msg);
+    CHECK(flag, error_msg);
 
     // check if data_dir in black list, data_dirs doesn't contain dir in black 
list
     std::string black_list_file =
@@ -159,9 +159,7 @@ void replication_options::initialize()
         data_dir_tags.emplace_back(config_data_dir_tags[i]);
     }
 
-    if (data_dirs.empty()) {
-        dassert_f(false, "no replica data dir found, maybe not set or excluded 
by black list");
-    }
+    CHECK(!data_dirs.empty(), "no replica data dir found, maybe not set or 
excluded by black list");
 
     deny_client_on_start = dsn_config_get_value_bool("replication",
                                                      "deny_client_on_start",
@@ -409,7 +407,7 @@ void replication_options::initialize()
 
     max_concurrent_bulk_load_downloading_count = 
FLAGS_max_concurrent_bulk_load_downloading_count;
 
-    dassert_f(replica_helper::load_meta_servers(meta_servers), "invalid meta 
server config");
+    CHECK(replica_helper::load_meta_servers(meta_servers), "invalid meta 
server config");
 
     sanity_check();
 }
@@ -486,11 +484,11 @@ bool replica_helper::load_meta_servers(/*out*/ 
std::vector<dsn::rpc_address> &se
                   section,
                   key);
         uint32_t port_num = 0;
-        dassert_f(dsn::internal::buf2unsigned(hostname_port[1], port_num) && 
port_num < UINT16_MAX,
-                  "invalid address '{}' specified in config [{}].{}",
-                  s.c_str(),
-                  section,
-                  key);
+        CHECK(dsn::internal::buf2unsigned(hostname_port[1], port_num) && 
port_num < UINT16_MAX,
+              "invalid address '{}' specified in config [{}].{}",
+              s,
+              section,
+              key);
         if (0 != (ip = 
::dsn::rpc_address::ipv4_from_host(hostname_port[0].c_str()))) {
             addr.assign_ipv4(ip, static_cast<uint16_t>(port_num));
         } else if (!addr.from_string_ipv4(s.c_str())) {
@@ -580,9 +578,7 @@ replication_options::get_data_dirs_in_black_list(const 
std::string &fname,
 
     LOG_INFO_F("data_dirs_black_list_file[{}] found, apply it", fname);
     std::ifstream file(fname);
-    if (!file) {
-        dassert_f(false, "open data_dirs_black_list_file failed: {}", fname);
-    }
+    CHECK(file, "open data_dirs_black_list_file failed: {}", fname);
 
     std::string str;
     int count = 0;
diff --git a/src/geo/bench/bench.cpp b/src/geo/bench/bench.cpp
index 17717e32c..2ee039ca8 100644
--- a/src/geo/bench/bench.cpp
+++ b/src/geo/bench/bench.cpp
@@ -86,7 +86,7 @@ int main(int argc, char **argv)
             std::string value;
             S2LatLng latlng(S2Testing::SamplePoint(rect));
             bool ok = codec.encode_to_value(latlng.lat().degrees(), 
latlng.lng().degrees(), value);
-            dassert_f(ok, "");
+            CHECK(ok, "");
             int ret = my_geo.set(std::to_string(i), "", value, 1000);
             if (ret != pegasus::PERR_OK) {
                 std::cerr << "set data failed. error=" << ret << std::endl;
diff --git a/src/geo/lib/geo_client.cpp b/src/geo/lib/geo_client.cpp
index bdb979721..8497f4c94 100644
--- a/src/geo/lib/geo_client.cpp
+++ b/src/geo/lib/geo_client.cpp
@@ -67,13 +67,13 @@ geo_client::geo_client(const char *config_file,
                        const char *geo_app_name)
 {
     bool ok = pegasus_client_factory::initialize(config_file);
-    dassert(ok, "init pegasus client factory failed");
+    CHECK(ok, "init pegasus client factory failed");
 
     _common_data_client = pegasus_client_factory::get_client(cluster_name, 
common_app_name);
-    dassert(_common_data_client != nullptr, "init pegasus _common_data_client 
failed");
+    CHECK_NOTNULL(_common_data_client, "init pegasus _common_data_client 
failed");
 
     _geo_data_client = pegasus_client_factory::get_client(cluster_name, 
geo_app_name);
-    dassert(_geo_data_client != nullptr, "init pegasus _geo_data_client 
failed");
+    CHECK_NOTNULL(_geo_data_client, "init pegasus _geo_data_client failed");
 
     _min_level = (int32_t)dsn_config_get_value_uint64(
         "geo_client.lib", "min_level", 12, "min cell level for scan");
@@ -93,7 +93,7 @@ geo_client::geo_client(const char *config_file,
         "geo_client.lib", "longitude_index", 4, "longitude index in value");
 
     dsn::error_s s = _codec.set_latlng_indices(latitude_index, 
longitude_index);
-    dassert_f(s.is_ok(), "set_latlng_indices({}, {}) failed", latitude_index, 
longitude_index);
+    CHECK(s.is_ok(), "set_latlng_indices({}, {}) failed", latitude_index, 
longitude_index);
 }
 
 dsn::error_s geo_client::set_max_level(int level)
@@ -649,7 +649,7 @@ void geo_client::async_get_result_from_cells(const 
S2CellUnion &cids,
                 }
             }
 
-            dassert(!start_stop_sort_keys.first.empty(), "");
+            CHECK(!start_stop_sort_keys.first.empty(), "");
             // the last sub slice of current `cid` on `_max_level` in Hilbert 
curve covered by `cap`
             if (start_stop_sort_keys.second.empty()) {
                 start_stop_sort_keys.second = gen_stop_sort_key(pre, hash_key);
diff --git a/src/geo/test/geo_test.cpp b/src/geo/test/geo_test.cpp
index f0c965650..27b948880 100644
--- a/src/geo/test/geo_test.cpp
+++ b/src/geo/test/geo_test.cpp
@@ -42,7 +42,7 @@ public:
         std::vector<dsn::rpc_address> meta_list;
         bool ok = dsn::replication::replica_helper::load_meta_servers(
             meta_list, PEGASUS_CLUSTER_SECTION_NAME.c_str(), "onebox");
-        dassert_f(ok, "load_meta_servers failed");
+        CHECK(ok, "load_meta_servers failed");
         auto ddl_client = new 
dsn::replication::replication_ddl_client(meta_list);
         dsn::error_code error = ddl_client->create_app("temp_geo", "pegasus", 
4, 3, {}, false);
         dcheck_eq(dsn::ERR_OK, error);
diff --git a/src/meta/backup_engine.cpp b/src/meta/backup_engine.cpp
index a66a48ae0..a205ba622 100644
--- a/src/meta/backup_engine.cpp
+++ b/src/meta/backup_engine.cpp
@@ -103,9 +103,8 @@ error_code backup_engine::write_backup_file(const 
std::string &file_name,
         LOG_INFO_F("create file {} failed", file_name);
         return err;
     }
-    dassert_f(remote_file != nullptr,
-              "create file {} succeed, but can't get handle",
-              create_file_req.file_name);
+    CHECK_NOTNULL(
+        remote_file, "create file {} succeed, but can't get handle", 
create_file_req.file_name);
     remote_file
         ->write(dist::block_service::write_request{write_buffer},
                 TASK_CODE_EXEC_INLINED,
diff --git a/src/meta/meta_server_failure_detector.cpp 
b/src/meta/meta_server_failure_detector.cpp
index 2b5ddd33c..72fbd97aa 100644
--- a/src/meta/meta_server_failure_detector.cpp
+++ b/src/meta/meta_server_failure_detector.cpp
@@ -91,14 +91,14 @@ bool meta_server_failure_detector::get_leader(rpc_address 
*leader)
         // get leader addr
         auto addr_part = str.substr(pos + 1, str.length() - pos - 1);
         if (!leader->from_string_ipv4(addr_part.data())) {
-            dassert_f(false, "parse {} to rpc_address failed", addr_part);
+            CHECK(false, "parse {} to rpc_address failed", addr_part);
         }
 
         // get the return value which implies whether the current node is 
primary or not
         bool is_leader = true;
         auto is_leader_part = str.substr(0, pos);
         if (!dsn::buf2bool(is_leader_part, is_leader)) {
-            dassert_f(false, "parse {} to bool failed", is_leader_part);
+            CHECK(false, "parse {} to bool failed", is_leader_part);
         }
         return is_leader;
     });
diff --git a/src/meta/meta_split_service.cpp b/src/meta/meta_split_service.cpp
index 0e90ff3fc..f42f2ec09 100644
--- a/src/meta/meta_split_service.cpp
+++ b/src/meta/meta_split_service.cpp
@@ -129,8 +129,8 @@ void 
meta_split_service::register_child_on_meta(register_child_rpc rpc)
 
     zauto_write_lock l(app_lock());
     std::shared_ptr<app_state> app = _state->get_app(app_name);
-    dassert_f(app != nullptr, "app({}) is not existed", app_name);
-    dassert_f(app->is_stateful, "app({}) is stateless currently", app_name);
+    CHECK(app, "app({}) is not existed", app_name);
+    CHECK(app->is_stateful, "app({}) is stateless currently", app_name);
 
     const gpid &parent_gpid = request.parent_config.pid;
     const gpid &child_gpid = request.child_config.pid;
@@ -245,8 +245,8 @@ void 
meta_split_service::on_add_child_on_remote_storage_reply(error_code ec,
 
     zauto_write_lock l(app_lock());
     std::shared_ptr<app_state> app = _state->get_app(app_name);
-    dassert_f(app != nullptr, "app({}) is not existed", app_name);
-    dassert_f(app->is_stateful, "app({}) is stateless currently", app_name);
+    CHECK(app, "app({}) is not existed", app_name);
+    CHECK(app->is_stateful, "app({}) is stateless currently", app_name);
 
     const gpid &parent_gpid = request.parent_config.pid;
     const gpid &child_gpid = request.child_config.pid;
diff --git a/src/meta/meta_state_service_utils_impl.h 
b/src/meta/meta_state_service_utils_impl.h
index 910bed66b..a5a9ec378 100644
--- a/src/meta/meta_state_service_utils_impl.h
+++ b/src/meta/meta_state_service_utils_impl.h
@@ -53,9 +53,9 @@ struct op_type
             "OP_GET_CHILDREN",
         };
 
-        dassert_f(v != OP_NONE && v <= (sizeof(op_type_to_string_map) / 
sizeof(char *)),
-                  "invalid type: {}",
-                  v);
+        CHECK(v != OP_NONE && v <= (sizeof(op_type_to_string_map) / 
sizeof(char *)),
+              "invalid type: {}",
+              v);
         return op_type_to_string_map[v - 1];
     }
 };
@@ -81,11 +81,11 @@ struct operation : pipeline::environment
             pipeline::repeat(std::move(*this_instance), 1_s);
             return;
         }
-        dassert_f(false,
-                  "request({}) on path({}) encountered an unexpected 
error({})",
-                  op_type::to_string(type),
-                  path,
-                  ec.to_string());
+        CHECK(false,
+              "request({}) on path({}) encountered an unexpected error({})",
+              op_type::to_string(type),
+              path,
+              ec.to_string());
     }
 
     dist::meta_state_service *remote_storage() const { return _ms->_remote; }
diff --git a/src/meta/test/meta_app_operation_test.cpp 
b/src/meta/test/meta_app_operation_test.cpp
index aca217e67..53c414dec 100644
--- a/src/meta/test/meta_app_operation_test.cpp
+++ b/src/meta/test/meta_app_operation_test.cpp
@@ -128,7 +128,7 @@ public:
                                          int32_t max_replica_count)
     {
         auto app = find_app(app_name);
-        dassert_f(app != nullptr, "app({}) does not exist", app_name);
+        CHECK(app, "app({}) does not exist", app_name);
 
         auto &partition_config = app->partitions[partition_index];
         partition_config.max_replica_count = max_replica_count;
@@ -137,7 +137,7 @@ public:
     void set_max_replica_count_env(const std::string &app_name, const 
std::string &env)
     {
         auto app = find_app(app_name);
-        dassert_f(app != nullptr, "app({}) does not exist", app_name);
+        CHECK(app, "app({}) does not exist", app_name);
 
         if (env.empty()) {
             app->envs.erase(replica_envs::UPDATE_MAX_REPLICA_COUNT);
@@ -176,7 +176,7 @@ public:
                                                       int32_t 
max_replica_count)
     {
         auto app = find_app(app_name);
-        dassert_f(app != nullptr, "app({}) does not exist", app_name);
+        CHECK(app, "app({}) does not exist", app_name);
 
         auto partition_size = static_cast<int>(app->partitions.size());
         for (int i = 0; i < partition_size; ++i) {
diff --git a/src/redis_protocol/proxy_lib/proxy_layer.cpp 
b/src/redis_protocol/proxy_lib/proxy_layer.cpp
index 88e939828..3172eff3f 100644
--- a/src/redis_protocol/proxy_lib/proxy_layer.cpp
+++ b/src/redis_protocol/proxy_lib/proxy_layer.cpp
@@ -21,6 +21,7 @@
 
 #include <rrdb/rrdb.code.definition.h>
 #include "proxy_layer.h"
+#include "utils/fmt_logging.h"
 
 namespace pegasus {
 namespace proxy {
@@ -105,7 +106,7 @@ void proxy_stub::remove_session(dsn::rpc_address 
remote_address)
 proxy_session::proxy_session(proxy_stub *op, dsn::message_ex *first_msg)
     : _stub(op), _is_session_reset(false), _backup_one_request(first_msg)
 {
-    dassert(first_msg != nullptr, "null msg when create session");
+    CHECK_NOTNULL(first_msg, "null msg when create session");
     _backup_one_request->add_ref();
 
     _remote_address = _backup_one_request->header->from_address;
diff --git a/src/redis_protocol/proxy_lib/redis_parser.cpp 
b/src/redis_protocol/proxy_lib/redis_parser.cpp
index f4607d4c4..fdff8b70c 100644
--- a/src/redis_protocol/proxy_lib/redis_parser.cpp
+++ b/src/redis_protocol/proxy_lib/redis_parser.cpp
@@ -103,12 +103,11 @@ void redis_parser::prepare_current_buffer()
     void *msg_buffer;
     if (_current_buffer == nullptr) {
         dsn::message_ex *first_msg = _recv_buffers.front();
-        dassert(
-            first_msg->read_next(&msg_buffer, &_current_buffer_length),
-            "read dsn::message_ex* failed, msg from_address = %s, to_address = 
%s, rpc_name = %s",
-            first_msg->header->from_address.to_string(),
-            first_msg->to_address.to_string(),
-            first_msg->header->rpc_name);
+        CHECK(first_msg->read_next(&msg_buffer, &_current_buffer_length),
+              "read dsn::message_ex* failed, msg from_address = {}, to_address 
= {}, rpc_name = {}",
+              first_msg->header->from_address.to_string(),
+              first_msg->to_address.to_string(),
+              first_msg->header->rpc_name);
         _current_buffer = static_cast<char *>(msg_buffer);
         _current_cursor = 0;
     } else if (_current_cursor >= _current_buffer_length) {
@@ -369,7 +368,7 @@ void redis_parser::reply_all_ready()
     std::vector<dsn::message_ex *> ready_responses;
     fetch_and_dequeue_messages(ready_responses, true);
     for (dsn::message_ex *m : ready_responses) {
-        dassert(m != nullptr, "");
+        CHECK(m, "");
         dsn_rpc_reply(m, ::dsn::ERR_OK);
         // added when message is created
         m->release_ref();
@@ -1355,10 +1354,10 @@ void 
redis_parser::redis_simple_string::marshalling(::dsn::binary_writer &write_
 
 void redis_parser::redis_bulk_string::marshalling(::dsn::binary_writer 
&write_stream) const
 {
-    dassert_f((-1 == length && data.length() == 0) || data.length() == length,
-              "{} VS {}",
-              data.length(),
-              length);
+    CHECK((-1 == length && data.length() == 0) || data.length() == length,
+          "{} VS {}",
+          data.length(),
+          length);
     write_stream.write_pod('$');
     std::string length_str = std::to_string(length);
     write_stream.write(length_str.c_str(), (int)length_str.length());
@@ -1373,10 +1372,10 @@ void 
redis_parser::redis_bulk_string::marshalling(::dsn::binary_writer &write_st
 
 void redis_parser::redis_array::marshalling(::dsn::binary_writer 
&write_stream) const
 {
-    dassert_f((-1 == count && array.size() == 0) || array.size() == count,
-              "{} VS {}",
-              array.size(),
-              count);
+    CHECK((-1 == count && array.size() == 0) || array.size() == count,
+          "{} VS {}",
+          array.size(),
+          count);
     write_stream.write_pod('*');
     std::string count_str = std::to_string(count);
     write_stream.write(count_str.c_str(), (int)count_str.length());
diff --git a/src/replica/duplication/replica_follower.cpp 
b/src/replica/duplication/replica_follower.cpp
index cedc842e0..186de950b 100644
--- a/src/replica/duplication/replica_follower.cpp
+++ b/src/replica/duplication/replica_follower.cpp
@@ -55,10 +55,10 @@ void replica_follower::init_master_info()
     const auto &meta_list_str = 
envs.at(duplication_constants::kDuplicationEnvMasterMetasKey);
     std::vector<std::string> metas;
     boost::split(metas, meta_list_str, boost::is_any_of(","));
-    dassert_f(!metas.empty(), "master cluster meta list is invalid!");
+    CHECK(!metas.empty(), "master cluster meta list is invalid!");
     for (const auto &meta : metas) {
         dsn::rpc_address node;
-        dassert_f(node.from_string_ipv4(meta.c_str()), "{} is invalid meta 
address", meta);
+        CHECK(node.from_string_ipv4(meta.c_str()), "{} is invalid meta 
address", meta);
         _master_meta_list.emplace_back(std::move(node));
     }
 }
diff --git a/src/replica/mutation_log.cpp b/src/replica/mutation_log.cpp
index d32899547..3dca029d7 100644
--- a/src/replica/mutation_log.cpp
+++ b/src/replica/mutation_log.cpp
@@ -967,7 +967,8 @@ error_code mutation_log::reset_from(const std::string &dir,
             if (!utils::filesystem::rename_path(temp_dir, _dir)) {
                 // rollback failed means old log files are not be recovered, 
it may be lost if only
                 // LOG_ERROR,  dassert for manual resolve it
-                dassert_f("rollback {} to {} failed", temp_dir, _dir);
+                // TODO(yingchun): will be fixed later
+                // CHECK(false, "rollback {} to {} failed", temp_dir, _dir);
             }
         } else {
             if (!dsn::utils::filesystem::remove_path(temp_dir)) {
diff --git a/src/replica/replication_app_base.cpp 
b/src/replica/replication_app_base.cpp
index f3f7c554c..f77dc248c 100644
--- a/src/replica/replication_app_base.cpp
+++ b/src/replica/replication_app_base.cpp
@@ -75,7 +75,7 @@ error_code write_blob_to_file(const std::string &file, const 
blob &data)
                                        sz = s;
                                    },
                                    0);
-    dassert_f(tsk, "create file::write task failed");
+    CHECK_NOTNULL(tsk, "create file::write task failed");
     tracker.wait_outstanding_tasks();
     file::flush(hfile);
     file::close(hfile);
@@ -95,7 +95,7 @@ error_code write_blob_to_file(const std::string &file, const 
blob &data)
 error_code replica_init_info::load(const std::string &dir)
 {
     std::string info_path = utils::filesystem::path_combine(dir, kInitInfo);
-    dassert_f(utils::filesystem::path_exists(info_path), "file({}) not exist", 
info_path);
+    CHECK(utils::filesystem::path_exists(info_path), "file({}) not exist", 
info_path);
     ERR_LOG_AND_RETURN_NOT_OK(
         load_json(info_path), "load replica_init_info from {} failed", 
info_path);
     LOG_INFO_F("load replica_init_info from {} succeed: {}", info_path, 
to_string());
@@ -281,9 +281,8 @@ error_code replication_app_base::open_new_internal(replica 
*r,
                                                    int64_t shared_log_start,
                                                    int64_t private_log_start)
 {
-    dassert_f(utils::filesystem::remove_path(_dir_data), "remove data dir {} 
failed", _dir_data);
-    dassert_f(
-        utils::filesystem::create_directory(_dir_data), "create data dir {} 
failed", _dir_data);
+    CHECK(utils::filesystem::remove_path(_dir_data), "remove data dir {} 
failed", _dir_data);
+    CHECK(utils::filesystem::create_directory(_dir_data), "create data dir {} 
failed", _dir_data);
     ERR_LOG_AND_RETURN_NOT_TRUE(utils::filesystem::directory_exists(_dir_data),
                                 ERR_FILE_OPERATION_FAILED,
                                 "[{}]: create replica data dir {} failed",
diff --git a/src/reporter/pegasus_counter_reporter.cpp 
b/src/reporter/pegasus_counter_reporter.cpp
index 485b37a91..a5b1fa1a7 100644
--- a/src/reporter/pegasus_counter_reporter.cpp
+++ b/src/reporter/pegasus_counter_reporter.cpp
@@ -355,7 +355,7 @@ void 
pegasus_counter_reporter::on_report_timer(std::shared_ptr<boost::asio::dead
         timer->async_wait(std::bind(
             &pegasus_counter_reporter::on_report_timer, this, timer, 
std::placeholders::_1));
     } else if (boost::system::errc::operation_canceled != ec) {
-        dassert(false, "pegasus report timer error!!!");
+        CHECK(false, "pegasus report timer error!!!");
     }
 }
 } // namespace server
diff --git a/src/runtime/rpc/thrift_message_parser.cpp 
b/src/runtime/rpc/thrift_message_parser.cpp
index 918d92518..2987da143 100644
--- a/src/runtime/rpc/thrift_message_parser.cpp
+++ b/src/runtime/rpc/thrift_message_parser.cpp
@@ -296,7 +296,7 @@ message_ex 
*thrift_message_parser::get_message_on_receive(message_reader *reader
     case 1:
         return parse_request_body_v1(reader, read_next);
     default:
-        dassert_f(false, "invalid header version: {}", _header_version);
+        CHECK(false, "invalid header version: {}", _header_version);
     }
 
     return nullptr;
diff --git a/src/runtime/security/kinit_context.cpp 
b/src/runtime/security/kinit_context.cpp
index df7ce3b87..24c0dab21 100644
--- a/src/runtime/security/kinit_context.cpp
+++ b/src/runtime/security/kinit_context.cpp
@@ -269,7 +269,7 @@ void kinit_context::schedule_renew_credentials()
         } else if (err == boost::system::errc::operation_canceled) {
             LOG_WARNING("the renew credentials timer is cancelled");
         } else {
-            dassert_f(false, "unhandled error({})", err.message());
+            CHECK(false, "unhandled error({})", err.message());
         }
     });
 }
diff --git a/src/runtime/security/sasl_init.cpp 
b/src/runtime/security/sasl_init.cpp
index 843b94f4d..864fba1dd 100644
--- a/src/runtime/security/sasl_init.cpp
+++ b/src/runtime/security/sasl_init.cpp
@@ -80,7 +80,7 @@ int sasl_get_username(void *context, int id, const char 
**result, unsigned *len)
         }
         return SASL_OK;
     default:
-        dassert_f(false, "unexpected SASL callback type: {}", id);
+        CHECK(false, "unexpected SASL callback type: {}", id);
         return SASL_BADPARAM;
     }
 }
diff --git a/src/runtime/service_engine.cpp b/src/runtime/service_engine.cpp
index 6e0dac2bc..10435d88a 100644
--- a/src/runtime/service_engine.cpp
+++ b/src/runtime/service_engine.cpp
@@ -233,12 +233,12 @@ void service_engine::start_node(service_app_spec 
&app_spec)
             // union to existing node if any port is shared
             auto it = app_name_by_port.find(p);
             if (it != app_name_by_port.end()) {
-                dassert_f(false,
-                          "network port {} usage confliction for {} vs {}, "
-                          "please reconfig",
-                          p,
-                          it->second,
-                          app_spec.full_name);
+                CHECK(false,
+                      "network port {} usage confliction for {} vs {}, "
+                      "please reconfig",
+                      p,
+                      it->second,
+                      app_spec.full_name);
             }
             app_name_by_port.emplace(p, app_spec.full_name);
         }
diff --git a/src/server/available_detector.cpp 
b/src/server/available_detector.cpp
index f1dfa4738..6ee734edb 100644
--- a/src/server/available_detector.cpp
+++ b/src/server/available_detector.cpp
@@ -79,13 +79,13 @@ available_detector::available_detector()
                                               "available detect timeout");
     // initialize the _client.
     if (!pegasus_client_factory::initialize(nullptr)) {
-        dassert(false, "Initialize the pegasus client failed");
+        CHECK(false, "Initialize the pegasus client failed");
     }
     _client = pegasus_client_factory::get_client(_cluster_name.c_str(), 
_app_name.c_str());
-    dassert(_client != nullptr, "Initialize the _client failed");
+    CHECK_NOTNULL(_client, "Initialize the _client failed");
     _result_writer = dsn::make_unique<result_writer>(_client);
     _ddl_client.reset(new replication_ddl_client(_meta_list));
-    dassert(_ddl_client != nullptr, "Initialize the _ddl_client failed");
+    CHECK_NOTNULL(_ddl_client, "Initialize the _ddl_client failed");
     if (!_alert_email_address.empty()) {
         _send_alert_email_cmd = "cd " + _alert_script_dir + "; bash 
sendmail.sh alert " +
                                 _alert_email_address + " " + _cluster_name + " 
" + _app_name + " ";
diff --git a/src/server/capacity_unit_calculator.cpp 
b/src/server/capacity_unit_calculator.cpp
index 8f2574fd6..0bde5f961 100644
--- a/src/server/capacity_unit_calculator.cpp
+++ b/src/server/capacity_unit_calculator.cpp
@@ -23,6 +23,7 @@
 #include "utils/token_bucket_throttling_controller.h"
 #include <rocksdb/status.h>
 #include "hotkey_collector.h"
+#include "utils/fmt_logging.h"
 
 namespace pegasus {
 namespace server {
@@ -37,10 +38,9 @@ capacity_unit_calculator::capacity_unit_calculator(
       _write_hotkey_collector(write_hotkey_collector),
       _read_size_throttling_controller(read_size_throttling_controller)
 {
-    dassert(_read_hotkey_collector != nullptr, "read hotkey collector is a 
nullptr");
-    dassert(_write_hotkey_collector != nullptr, "write hotkey collector is a 
nullptr");
-    dassert(_read_size_throttling_controller != nullptr,
-            "_read_size_throttling_controller is a nullptr");
+    CHECK(_read_hotkey_collector, "read hotkey collector is a nullptr");
+    CHECK(_write_hotkey_collector, "write hotkey collector is a nullptr");
+    CHECK(_read_size_throttling_controller, "_read_size_throttling_controller 
is a nullptr");
 
     _read_capacity_unit_size =
         dsn_config_get_value_uint64("pegasus.server",
@@ -52,10 +52,10 @@ capacity_unit_calculator::capacity_unit_calculator(
                                     "perf_counter_write_capacity_unit_size",
                                     4 * 1024,
                                     "capacity unit size of write requests, 
default 4KB");
-    dassert(powerof2(_read_capacity_unit_size),
-            "'perf_counter_read_capacity_unit_size' must be a power of 2");
-    dassert(powerof2(_write_capacity_unit_size),
-            "'perf_counter_write_capacity_unit_size' must be a power of 2");
+    CHECK(powerof2(_read_capacity_unit_size),
+          "'perf_counter_read_capacity_unit_size' must be a power of 2");
+    CHECK(powerof2(_write_capacity_unit_size),
+          "'perf_counter_write_capacity_unit_size' must be a power of 2");
     _log_read_cu_size = log(_read_capacity_unit_size) / log(2);
     _log_write_cu_size = log(_write_capacity_unit_size) / log(2);
 
diff --git a/src/server/hotkey_collector.cpp b/src/server/hotkey_collector.cpp
index 996565ef8..0972b18f2 100644
--- a/src/server/hotkey_collector.cpp
+++ b/src/server/hotkey_collector.cpp
@@ -280,7 +280,7 @@ void 
hotkey_collector::on_start_detect(dsn::replication::detect_hotkey_response
         resp.err = dsn::ERR_INVALID_STATE;
         resp.__set_err_hint(hint);
         LOG_ERROR_PREFIX(hint);
-        dassert(false, "invalid collector state");
+        CHECK(false, "invalid collector state");
     }
     resp.__set_err_hint(hint);
     LOG_WARNING_PREFIX(hint);
diff --git a/src/server/info_collector.cpp b/src/server/info_collector.cpp
index 1885a8092..33eedd809 100644
--- a/src/server/info_collector.cpp
+++ b/src/server/info_collector.cpp
@@ -69,13 +69,11 @@ info_collector::info_collector()
 
     _usage_stat_app = dsn_config_get_value_string(
         "pegasus.collector", "usage_stat_app", "", "app for recording usage 
statistics");
-    dassert(!_usage_stat_app.empty(), "");
+    CHECK(!_usage_stat_app.empty(), "");
     // initialize the _client.
-    if (!pegasus_client_factory::initialize(nullptr)) {
-        dassert(false, "Initialize the pegasus client failed");
-    }
+    CHECK(pegasus_client_factory::initialize(nullptr), "Initialize the pegasus 
client failed");
     _client = pegasus_client_factory::get_client(_cluster_name.c_str(), 
_usage_stat_app.c_str());
-    dassert(_client != nullptr, "Initialize the client failed");
+    CHECK_NOTNULL(_client, "Initialize the client failed");
     _result_writer = dsn::make_unique<result_writer>(_client);
 
     _capacity_unit_fetch_interval_seconds =
diff --git a/src/server/meta_store.cpp b/src/server/meta_store.cpp
index d65f1c004..71581a4da 100644
--- a/src/server/meta_store.cpp
+++ b/src/server/meta_store.cpp
@@ -104,10 +104,10 @@ std::string meta_store::get_usage_scenario() const
     if (ec != ::dsn::ERR_OK) {
         return ec;
     }
-    dassert_f(dsn::buf2uint64(data, *value),
-              "rocksdb {} get \"{}\" from meta column family failed to parse 
into uint64",
-              db->GetName(),
-              data);
+    CHECK(dsn::buf2uint64(data, *value),
+          "rocksdb {} get \"{}\" from meta column family failed to parse into 
uint64",
+          db->GetName(),
+          data);
     return ::dsn::ERR_OK;
 }
 
diff --git a/src/server/pegasus_server_impl.cpp 
b/src/server/pegasus_server_impl.cpp
index 35be06960..899bcb2aa 100644
--- a/src/server/pegasus_server_impl.cpp
+++ b/src/server/pegasus_server_impl.cpp
@@ -114,7 +114,7 @@ void pegasus_server_impl::parse_checkpoints()
 pegasus_server_impl::~pegasus_server_impl()
 {
     if (_is_open) {
-        dassert(_db != nullptr, "");
+        CHECK_NOTNULL(_db, "");
         release_db();
     }
 }
@@ -258,15 +258,15 @@ int 
pegasus_server_impl::on_batched_write_requests(int64_t decree,
                                                    dsn::message_ex **requests,
                                                    int count)
 {
-    dassert(_is_open, "");
-    dassert(requests != nullptr, "");
+    CHECK(_is_open, "");
+    CHECK_NOTNULL(requests, "");
 
     return _server_write->on_batched_write_requests(requests, count, decree, 
timestamp);
 }
 
 void pegasus_server_impl::on_get(get_rpc rpc)
 {
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
     _pfc_get_qps->increment();
     uint64_t start_time = dsn_now_ns();
 
@@ -350,7 +350,7 @@ void pegasus_server_impl::on_get(get_rpc rpc)
 
 void pegasus_server_impl::on_multi_get(multi_get_rpc rpc)
 {
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
     _pfc_multi_get_qps->increment();
     uint64_t start_time = dsn_now_ns();
 
@@ -780,7 +780,7 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc)
 
 void pegasus_server_impl::on_batch_get(batch_get_rpc rpc)
 {
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
     _pfc_batch_get_qps->increment();
     int64_t start_time = dsn_now_ns();
 
@@ -897,7 +897,7 @@ void pegasus_server_impl::on_batch_get(batch_get_rpc rpc)
 
 void pegasus_server_impl::on_sortkey_count(sortkey_count_rpc rpc)
 {
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
 
     _pfc_scan_qps->increment();
     uint64_t start_time = dsn_now_ns();
@@ -981,7 +981,7 @@ void 
pegasus_server_impl::on_sortkey_count(sortkey_count_rpc rpc)
 
 void pegasus_server_impl::on_ttl(ttl_rpc rpc)
 {
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
 
     const auto &key = rpc.request();
     auto &resp = rpc.response();
@@ -1048,7 +1048,7 @@ void pegasus_server_impl::on_ttl(ttl_rpc rpc)
 
 void pegasus_server_impl::on_get_scanner(get_scanner_rpc rpc)
 {
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
     _pfc_scan_qps->increment();
     uint64_t start_time = dsn_now_ns();
 
@@ -1121,9 +1121,9 @@ void pegasus_server_impl::on_get_scanner(get_scanner_rpc 
rpc)
             // hashkey, we should not seek this prefix by prefix bloom filter. 
However, it only
             // happen when do full scan (scanners got by 
get_unordered_scanners), in which case the
             // following flags has been updated.
-            dassert(!_data_cf_opts.prefix_extractor || 
rd_opts.total_order_seek, "Invalid option");
-            dassert(!_data_cf_opts.prefix_extractor || 
!rd_opts.prefix_same_as_start,
-                    "Invalid option");
+            CHECK(!_data_cf_opts.prefix_extractor || rd_opts.total_order_seek, 
"Invalid option");
+            CHECK(!_data_cf_opts.prefix_extractor || 
!rd_opts.prefix_same_as_start,
+                  "Invalid option");
         }
     }
 
@@ -1312,7 +1312,7 @@ void pegasus_server_impl::on_get_scanner(get_scanner_rpc 
rpc)
 
 void pegasus_server_impl::on_scan(scan_rpc rpc)
 {
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
     _pfc_scan_qps->increment();
     uint64_t start_time = dsn_now_ns();
     const auto &request = rpc.request();
@@ -1761,7 +1761,7 @@ dsn::error_code pegasus_server_impl::start(int argc, char 
**argv)
 void pegasus_server_impl::cancel_background_work(bool wait)
 {
     if (_is_open) {
-        dassert(_db != nullptr, "");
+        CHECK_NOTNULL(_db, "");
         rocksdb::CancelAllBackgroundWork(_db, wait);
     }
 }
@@ -2132,7 +2132,7 @@ private:
                                                       const dsn::blob 
&learn_request,
                                                       
dsn::replication::learn_state &state)
 {
-    dassert(_is_open, "");
+    CHECK(_is_open, "");
 
     int64_t ci = last_durable_decree();
     if (ci == 0) {
diff --git a/src/server/pegasus_server_impl_init.cpp 
b/src/server/pegasus_server_impl_init.cpp
index 3f0784055..aa17fd647 100644
--- a/src/server/pegasus_server_impl_init.cpp
+++ b/src/server/pegasus_server_impl_init.cpp
@@ -335,15 +335,15 @@ 
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
         "for all level 2 and higher levels, and "
         "'per_level:[none|snappy|zstd|lz4],[none|snappy|zstd|lz4],...' for 
each level 0,1,..., the "
         "last compression type will be used for levels not specified in the 
list.");
-    dassert(parse_compression_types(compression_str, 
_data_cf_opts.compression_per_level),
-            "parse rocksdb_compression_type failed.");
+    CHECK(parse_compression_types(compression_str, 
_data_cf_opts.compression_per_level),
+          "parse rocksdb_compression_type failed.");
 
     _meta_cf_opts = _data_cf_opts;
     // Set level0_file_num_compaction_trigger of meta CF as 10 to reduce 
frequent compaction.
     _meta_cf_opts.level0_file_num_compaction_trigger = 10;
     // Data in meta CF is very little, disable compression to save CPU load.
-    dassert(parse_compression_types("none", 
_meta_cf_opts.compression_per_level),
-            "parse rocksdb_compression_type failed.");
+    CHECK(parse_compression_types("none", _meta_cf_opts.compression_per_level),
+          "parse rocksdb_compression_type failed.");
 
     rocksdb::BlockBasedTableOptions tbl_opts;
     tbl_opts.read_amp_bytes_per_bit = FLAGS_read_amp_bytes_per_bit;
@@ -560,8 +560,8 @@ 
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
                                             "2 is the old version, 5 is the 
new "
                                             "version supported since rocksdb "
                                             "v6.6.4");
-        dassert(format_version == 2 || format_version == 5,
-                "[pegasus.server]rocksdb_format_version should be either '2' 
or '5'.");
+        CHECK(format_version == 2 || format_version == 5,
+              "[pegasus.server]rocksdb_format_version should be either '2' or 
'5'.");
         tbl_opts.format_version = format_version;
         
tbl_opts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bits_per_key, 
false));
 
@@ -570,8 +570,8 @@ 
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
                                         "rocksdb_filter_type",
                                         "prefix",
                                         "Bloom filter type, should be either 
'common' or 'prefix'");
-        dassert(filter_type == "common" || filter_type == "prefix",
-                "[pegasus.server]rocksdb_filter_type should be either 'common' 
or 'prefix'.");
+        CHECK(filter_type == "common" || filter_type == "prefix",
+              "[pegasus.server]rocksdb_filter_type should be either 'common' 
or 'prefix'.");
         if (filter_type == "prefix") {
             _data_cf_opts.prefix_extractor.reset(new HashkeyTransform());
             _data_cf_opts.memtable_prefix_bloom_size_ratio = 0.1;
diff --git a/src/server/pegasus_server_write.cpp 
b/src/server/pegasus_server_write.cpp
index f59b25593..f3d8b21eb 100644
--- a/src/server/pegasus_server_write.cpp
+++ b/src/server/pegasus_server_write.cpp
@@ -84,7 +84,7 @@ int pegasus_server_write::on_batched_writes(dsn::message_ex 
**requests, int coun
         _write_svc->batch_prepare(_decree);
 
         for (int i = 0; i < count; ++i) {
-            dassert(requests[i] != nullptr, "request[%d] is null", i);
+            CHECK_NOTNULL(requests[i], "request[{}] is null", i);
 
             // Make sure all writes are batched even if they are failed,
             // since we need to record the total qps and rpc latencies,
diff --git a/src/server/pegasus_write_service.cpp 
b/src/server/pegasus_write_service.cpp
index 4134b7ccc..dc0a24333 100644
--- a/src/server/pegasus_write_service.cpp
+++ b/src/server/pegasus_write_service.cpp
@@ -286,7 +286,7 @@ int pegasus_write_service::batch_commit(int64_t decree)
 void pegasus_write_service::batch_abort(int64_t decree, int err)
 {
     dassert(_batch_start_time != 0, "batch_abort must be called after 
batch_prepare");
-    dassert(err, "must abort on non-zero err");
+    CHECK(err, "must abort on non-zero err");
 
     _impl->batch_abort(decree, err);
     clear_up_batch_states();
diff --git a/src/server/pegasus_write_service_impl.h 
b/src/server/pegasus_write_service_impl.h
index 6d28f3950..70b90cc29 100644
--- a/src/server/pegasus_write_service_impl.h
+++ b/src/server/pegasus_write_service_impl.h
@@ -674,7 +674,7 @@ private:
             }
         }
         default:
-            dassert(false, "unsupported check type: %d", check_type);
+            CHECK(false, "unsupported check type: {}", check_type);
         }
         return false;
     }
diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h
index 98c204eb7..fb5bcef6f 100644
--- a/src/shell/command_helper.h
+++ b/src/shell/command_helper.h
@@ -258,7 +258,7 @@ inline bool 
validate_filter(pegasus::pegasus_client::filter_type filter_type,
         }
     }
     default:
-        dassert(false, "unsupported filter type: %d", filter_type);
+        LOG_FATAL_F("unsupported filter type: {}", filter_type);
     }
     return false;
 }
@@ -571,7 +571,7 @@ inline void scan_data_next(scan_data_context *context)
                         }
                         break;
                     default:
-                        dassert(false, "op = %d", context->op);
+                        LOG_FATAL_F("op = {}", context->op);
                         break;
                     }
                 } else {
diff --git a/src/shell/commands/debugger.cpp b/src/shell/commands/debugger.cpp
index 080b9570b..571ed0afe 100644
--- a/src/shell/commands/debugger.cpp
+++ b/src/shell/commands/debugger.cpp
@@ -90,7 +90,7 @@ bool mlog_dump(command_executor *e, shell_context *sc, 
arguments args)
             int64_t decree, int64_t timestamp, dsn::message_ex **requests, int 
count) mutable {
             for (int i = 0; i < count; ++i) {
                 dsn::message_ex *request = requests[i];
-                dassert(request != nullptr, "");
+                CHECK_NOTNULL(request, "");
                 ::dsn::message_ex *msg = (::dsn::message_ex *)request;
                 if (msg->local_rpc_code == RPC_REPLICATION_WRITE_EMPTY) {
                     os << INDENT << "[EMPTY]" << std::endl;
diff --git a/src/shell/commands/table_management.cpp 
b/src/shell/commands/table_management.cpp
index 85aa5b621..f78a9a755 100644
--- a/src/shell/commands/table_management.cpp
+++ b/src/shell/commands/table_management.cpp
@@ -260,7 +260,7 @@ bool app_disk(command_executor *e, shell_context *sc, 
arguments args)
             std::string counter_name;
             bool parse_ret = parse_app_pegasus_perf_counter_name(
                 m.name, app_id_x, partition_index_x, counter_name);
-            dassert(parse_ret, "name = %s", m.name.c_str());
+            CHECK(parse_ret, "name = {}", m.name);
             if (m.name.find("sst(MB)") != std::string::npos) {
                 disk_map[nodes[i].address][partition_index_x] = m.value;
             } else if (m.name.find("sst.count") != std::string::npos) {
diff --git a/src/shell/main.cpp b/src/shell/main.cpp
index 32a67d36f..d58340cf1 100644
--- a/src/shell/main.cpp
+++ b/src/shell/main.cpp
@@ -567,7 +567,7 @@ void register_all_commands()
 {
     for (int i = 0; commands[i].name != nullptr; ++i) {
         auto pr = s_commands_map.emplace(commands[i].name, &commands[i]);
-        dassert(pr.second, "the command '%s' is already registered!!!", 
commands[i].name);
+        CHECK(pr.second, "the command '{}' is already registered!!!", 
commands[i].name);
         s_max_name_length = std::max(s_max_name_length, 
strlen(commands[i].name));
     }
 }
diff --git a/src/test/bench_test/benchmark.cpp 
b/src/test/bench_test/benchmark.cpp
index 397042a73..0d45bf232 100644
--- a/src/test/bench_test/benchmark.cpp
+++ b/src/test/bench_test/benchmark.cpp
@@ -33,7 +33,7 @@ benchmark::benchmark()
 {
     _client = 
pegasus_client_factory::get_client(config::instance().pegasus_cluster_name.c_str(),
                                                  
config::instance().pegasus_app_name.c_str());
-    dassert_f(_client, "");
+    CHECK_NOTNULL(_client, "");
 
     // init operation method map
     _operation_method = {{kUnknown, nullptr},
@@ -60,7 +60,7 @@ void benchmark::run_benchmark(int thread_count, 
operation_type op_type)
 {
     // get method by operation type
     bench_method method = _operation_method[op_type];
-    dassert_f(method, "");
+    CHECK_NOTNULL(method, "");
 
     // create histogram statistic
     std::shared_ptr<rocksdb::Statistics> hist_stats = 
rocksdb::CreateDBStatistics();
diff --git a/src/test/function_test/utils/global_env.cpp 
b/src/test/function_test/utils/global_env.cpp
index 6d8d6a547..91653bfe4 100644
--- a/src/test/function_test/utils/global_env.cpp
+++ b/src/test/function_test/utils/global_env.cpp
@@ -54,7 +54,7 @@ void global_env::get_dirs()
     std::cout << "Pegasus project root: " << _pegasus_root << std::endl;
 
     char task_target[512] = {0};
-    dassert_f(getcwd(task_target, sizeof(task_target)), "");
+    CHECK(getcwd(task_target, sizeof(task_target)), "");
     _working_dir = task_target;
     std::cout << "working dir: " << _working_dir << std::endl;
 }
@@ -65,6 +65,6 @@ void global_env::get_hostip()
     uint32_t ipnet = htonl(ip);
     char buffer[512] = {0};
     memset(buffer, 0, sizeof(buffer));
-    dassert_f(inet_ntop(AF_INET, &ipnet, buffer, sizeof(buffer)), "");
+    CHECK(inet_ntop(AF_INET, &ipnet, buffer, sizeof(buffer)), "");
     _host_ip = buffer;
 }
diff --git a/src/test/kill_test/process_kill_testor.cpp 
b/src/test/kill_test/process_kill_testor.cpp
index 8090a00ef..c9a67101f 100644
--- a/src/test/kill_test/process_kill_testor.cpp
+++ b/src/test/kill_test/process_kill_testor.cpp
@@ -53,7 +53,7 @@ process_kill_testor::process_kill_testor(const char 
*config_file) : kill_testor(
         dsn_config_get_value_string(section, "killer_handler", "", "killer 
handler");
     dassert(killer_name.size() > 0, "");
     _killer_handler.reset(killer_handler::new_handler(killer_name.c_str()));
-    dassert(_killer_handler.get() != nullptr, "invalid killer_name(%s)", 
killer_name.c_str());
+    CHECK(_killer_handler, "invalid killer_name({})", killer_name);
 
     _job_types = {META, REPLICA, ZOOKEEPER};
     _job_index_to_kill.resize(JOB_LENGTH);
@@ -68,7 +68,7 @@ process_kill_testor::process_kill_testor(const char 
*config_file) : kill_testor(
         section, "total_zookeeper_count", 0, "total zookeeper count");
 
     if (_total_meta_count == 0 && _total_replica_count == 0 && 
_total_zookeeper_count == 0) {
-        dassert(false, "total number of meta/replica/zookeeper is 0");
+        CHECK(false, "total number of meta/replica/zookeeper is 0");
     }
 
     _kill_replica_max_count = (int32_t)dsn_config_get_value_uint64(
@@ -219,7 +219,7 @@ bool process_kill_testor::start_job_by_index(job_type type, 
int index)
 void process_kill_testor::stop_verifier_and_exit(const char *msg)
 {
     system("ps aux | grep pegasus | grep verifier | awk '{print $2}' | xargs 
kill -9");
-    dassert(false, "%s", msg);
+    CHECK(false, "{}", msg);
 }
 
 bool process_kill_testor::check_coredump()
diff --git a/src/test/pressure_test/main.cpp b/src/test/pressure_test/main.cpp
index 9388037dc..7e0394527 100644
--- a/src/test/pressure_test/main.cpp
+++ b/src/test/pressure_test/main.cpp
@@ -24,6 +24,7 @@
 
 #include "utils/api_utilities.h"
 #include "runtime/api_layer1.h"
+#include "utils/fmt_logging.h"
 #include "utils/rand.h"
 #include "runtime/task/async_calls.h"
 
@@ -145,14 +146,12 @@ void test_get(int32_t qps)
                     sortkey,
                     [hashkey, sortkey](int ec, string &&val, 
pegasus_client::internal_info &&info) {
                         if (ec == PERR_OK) {
-                            if (!verify(hashkey, sortkey, val)) {
-                                dassert(false,
-                                        "hashkey(%s) - sortkey(%s) - 
value(%s), but value(%s)",
-                                        hashkey.c_str(),
-                                        sortkey.c_str(),
-                                        get_value(hashkey, sortkey, 
value_len).c_str(),
-                                        val.c_str());
-                            }
+                            CHECK(verify(hashkey, sortkey, val),
+                                  "hashkey({}) - sortkey({}) - value({}), but 
value({})",
+                                  hashkey,
+                                  sortkey,
+                                  get_value(hashkey, sortkey, value_len),
+                                  val);
                         } else if (ec == PERR_NOT_FOUND) {
                             // don't output info
                             // LOG_WARNING("hashkey(%s) - sortkey(%s) doesn't 
exist in the server",
@@ -187,13 +186,11 @@ void test_del(int32_t qps)
                     hashkey,
                     sortkey,
                     [hashkey, sortkey](int ec, pegasus_client::internal_info 
&&info) {
-                        if (ec != PERR_OK && ec != PERR_NOT_FOUND && ec != 
PERR_TIMEOUT) {
-                            dassert(false,
-                                    "del hashkey(%s) - sortkey(%s) failed with 
err(%s)",
-                                    hashkey.c_str(),
-                                    sortkey.c_str(),
-                                    pg_client->get_error_string(ec));
-                        }
+                        CHECK(ec == PERR_OK || ec == PERR_NOT_FOUND || ec == 
PERR_TIMEOUT,
+                              "del hashkey({}) - sortkey({}) failed with 
err({})",
+                              hashkey,
+                              sortkey,
+                              pg_client->get_error_string(ec));
                     });
                 cnt -= 1;
             }
@@ -202,7 +199,7 @@ void test_del(int32_t qps)
     quota_task->cancel(false);
 }
 
-void test_scan(int32_t qps) { dassert(false, "not implemented"); }
+void test_scan(int32_t qps) { CHECK(false, "not implemented"); }
 
 static std::map<std::string, std::function<void(int32_t)>> _all_funcs;
 
@@ -253,20 +250,19 @@ int main(int argc, const char **argv)
         (int32_t)dsn_config_get_value_uint64("pressureclient", "value_len", 
64, "value length");
 
     dassert(qps > 0, "qps must GT 0, but qps(%d)", qps);
-    dassert(!op_name.empty(), "must assign operation name");
+    CHECK(!op_name.empty(), "must assign operation name");
 
     LOG_INFO("pressureclient %s qps = %d", op_name.c_str(), qps);
 
     pg_client = pegasus_client_factory::get_client(cluster_name.c_str(), 
app_name.c_str());
-
-    dassert(pg_client != nullptr, "initialize pg_client failed");
+    CHECK_NOTNULL(pg_client, "initialize pg_client failed");
 
     auto it = _all_funcs.find(op_name);
     if (it != _all_funcs.end()) {
         LOG_INFO("start pressureclient with %s qps(%d)", op_name.c_str(), qps);
         it->second(qps);
     } else {
-        dassert(false, "Unknown operation name(%s)", op_name.c_str());
+        CHECK(false, "Unknown operation name({})", op_name);
     }
     return 0;
 }
diff --git a/src/utils/flags.cpp b/src/utils/flags.cpp
index b7a313c3c..53c2f997f 100644
--- a/src/utils/flags.cpp
+++ b/src/utils/flags.cpp
@@ -61,7 +61,7 @@ public:
     case type_enum:                                                            
                    \
         value<type>() = dsn_config_get_value_##suffix(_section, _name, 
value<type>(), _desc);      \
         if (_validator) {                                                      
                    \
-            dassert_f(_validator(), "validation failed: {}", _name);           
                    \
+            CHECK(_validator(), "validation failed: {}", _name);               
                    \
         }                                                                      
                    \
         break
 
@@ -289,7 +289,7 @@ public:
 
         std::string total_message;
         if (!run_group_validators(&total_message)) {
-            dassert_f(false, "{}", total_message);
+            CHECK(false, "{}", total_message);
         }
     }
 
diff --git a/src/utils/fmt_logging.h b/src/utils/fmt_logging.h
index e5f0a1a29..33d843bd6 100644
--- a/src/utils/fmt_logging.h
+++ b/src/utils/fmt_logging.h
@@ -46,6 +46,9 @@
         }                                                                      
                    \
     } while (false)
 
+#define CHECK dassert_f
+#define CHECK_NOTNULL(p, ...) CHECK(p != nullptr, __VA_ARGS__)
+
 // Macros for writing log message prefixed by log_prefix().
 #define LOG_DEBUG_PREFIX(...) LOG_DEBUG_F("[{}] {}", log_prefix(), 
fmt::format(__VA_ARGS__))
 #define LOG_INFO_PREFIX(...) LOG_INFO_F("[{}] {}", log_prefix(), 
fmt::format(__VA_ARGS__))
diff --git a/src/utils/metrics.cpp b/src/utils/metrics.cpp
index 00efd008f..59dee0af8 100644
--- a/src/utils/metrics.cpp
+++ b/src/utils/metrics.cpp
@@ -249,9 +249,9 @@ void percentile_timer::on_timer(const 
boost::system::error_code &ec)
 
         // Cancel can only be launched by close().
         auto expected_state = state::kClosing;
-        dassert_f(_state.compare_exchange_strong(expected_state, 
state::kClosed),
-                  "wrong state for percentile_timer: {}, while expecting 
closing state",
-                  static_cast<int>(expected_state));
+        CHECK(_state.compare_exchange_strong(expected_state, state::kClosed),
+              "wrong state for percentile_timer: {}, while expecting closing 
state",
+              static_cast<int>(expected_state));
         on_close();
 
         return;
diff --git a/src/utils/nth_element.h b/src/utils/nth_element.h
index 591cba2a1..e15893358 100644
--- a/src/utils/nth_element.h
+++ b/src/utils/nth_element.h
@@ -70,9 +70,9 @@ public:
     void set_nths(const nth_container_type &nths)
     {
         _nths = nths;
-        dassert_f(std::is_sorted(_nths.begin(), _nths.end()),
-                  "nth indexes({}) is not sorted",
-                  fmt::join(_nths, " "));
+        CHECK(std::is_sorted(_nths.begin(), _nths.end()),
+              "nth indexes({}) is not sorted",
+              fmt::join(_nths, " "));
 
         _elements.assign(_nths.size(), value_type{});
     }
diff --git a/src/utils/test/metrics_test.cpp b/src/utils/test/metrics_test.cpp
index 352682ba0..2237083a7 100644
--- a/src/utils/test/metrics_test.cpp
+++ b/src/utils/test/metrics_test.cpp
@@ -674,11 +674,11 @@ void run_percentile(const metric_entity_ptr &my_entity,
                     Checker checker)
 {
     dassert_f(num_threads > 0, "Invalid num_threads({})", num_threads);
-    dassert_f(data.size() <= sample_size && data.size() % num_threads == 0,
-              "Invalid arguments, data_size={}, sample_size={}, 
num_threads={}",
-              data.size(),
-              sample_size,
-              num_threads);
+    CHECK(data.size() <= sample_size && data.size() % num_threads == 0,
+          "Invalid arguments, data_size={}, sample_size={}, num_threads={}",
+          data.size(),
+          sample_size,
+          num_threads);
 
     auto my_metric = prototype.instantiate(my_entity, interval_ms, 
kth_percentiles, sample_size);
 
diff --git a/src/utils/test/nth_element_utils.h 
b/src/utils/test/nth_element_utils.h
index 00078eb75..09e803b7a 100644
--- a/src/utils/test/nth_element_utils.h
+++ b/src/utils/test/nth_element_utils.h
@@ -59,13 +59,12 @@ public:
           _nths(nths),
           _rand(Rand())
     {
-        dassert_f(std::is_sorted(_nths.begin(), _nths.end()),
-                  "nth indexes({}) is not sorted",
-                  fmt::join(_nths, " "));
+        CHECK(std::is_sorted(_nths.begin(), _nths.end()),
+              "nth indexes({}) is not sorted",
+              fmt::join(_nths, " "));
 
         for (const auto &nth : _nths) {
-            dassert_f(
-                nth >= 0 && nth < _array_size, "nth should be in the range [0, 
{})", _array_size);
+            CHECK(nth >= 0 && nth < _array_size, "nth should be in the range 
[0, {})", _array_size);
         }
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to