This is an automated email from the ASF dual-hosted git repository.
zhaoliwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git
The following commit(s) were added to refs/heads/master by this push:
new 3598aa6cc refactor(conf): use DSN_DEFINE_uint64 to load uint64 type of
configs (#1359)
3598aa6cc is described below
commit 3598aa6cc26630646417b36354a3e886a56bc17e
Author: Yingchun Lai <[email protected]>
AuthorDate: Fri Feb 24 10:50:34 2023 +0800
refactor(conf): use DSN_DEFINE_uint64 to load uint64 type of configs (#1359)
---
src/block_service/test/fds_service_test.cpp | 7 +-
src/failure_detector/test/failure_detector.cpp | 7 +-
src/meta/meta_options.cpp | 17 --
src/meta/meta_options.h | 4 -
src/meta/meta_server_failure_detector.cpp | 9 +-
src/meta/meta_service.cpp | 10 +-
src/meta/partition_guardian.cpp | 13 +-
src/meta/test/meta_app_operation_test.cpp | 10 +-
src/meta/test/update_configuration_test.cpp | 6 +-
src/replica/replica_2pc.cpp | 13 +-
src/replica/replica_stub.cpp | 7 -
src/replica/replica_stub.h | 3 -
src/server/capacity_unit_calculator.cpp | 43 ++---
src/server/capacity_unit_calculator.h | 2 -
src/server/pegasus_server_impl.cpp | 7 +-
src/server/pegasus_server_impl.h | 17 +-
src/server/pegasus_server_impl_init.cpp | 199 ++++++++++------------
src/server/range_read_limiter.h | 1 -
src/server/test/capacity_unit_calculator_test.cpp | 8 +-
src/test/bench_test/benchmark.cpp | 33 ++--
src/test/bench_test/config.cpp | 8 -
src/test/bench_test/config.h | 4 -
src/test/bench_test/config.ini | 4 +-
23 files changed, 205 insertions(+), 227 deletions(-)
diff --git a/src/block_service/test/fds_service_test.cpp
b/src/block_service/test/fds_service_test.cpp
index 6bd1fa081..d45bc154e 100644
--- a/src/block_service/test/fds_service_test.cpp
+++ b/src/block_service/test/fds_service_test.cpp
@@ -36,6 +36,9 @@
using namespace dsn;
using namespace dsn::dist::block_service;
+DSN_DEFINE_uint64(fds_concurrent_test, min_size, 64, "");
+DSN_DEFINE_uint64(fds_concurrent_test, max_size, 64, "");
+
static std::string example_server_address = "<server-address>";
// please modify the the paras below to enable fds_service_test, default
fds_service_test will be
// skipped and return true
@@ -675,8 +678,6 @@ TEST_F(FDSClientTest, test_concurrent_upload_download)
DSN_DEFINE_int32(fds_concurrent_test, total_files, 64, "");
int total_files = FLAGS_total_files;
- unsigned long min_size =
dsn_config_get_value_uint64("fds_concurrent_test", "min_size", 64, "");
- unsigned long max_size =
dsn_config_get_value_uint64("fds_concurrent_test", "min_size", 64, "");
std::vector<std::string> filenames;
filenames.reserve(total_files);
@@ -688,7 +689,7 @@ TEST_F(FDSClientTest, test_concurrent_upload_download)
for (int i = 0; i < total_files; ++i) {
char index[64];
snprintf(index, 64, "%04d", i);
- unsigned long random_size = rand::next_u64(min_size, max_size);
+ unsigned long random_size = rand::next_u64(FLAGS_min_size,
FLAGS_max_size);
std::string filename = "randomfile" + std::string(index);
filenames.push_back(filename);
filesize.push_back(random_size);
diff --git a/src/failure_detector/test/failure_detector.cpp
b/src/failure_detector/test/failure_detector.cpp
index 19dded7ea..262be6339 100644
--- a/src/failure_detector/test/failure_detector.cpp
+++ b/src/failure_detector/test/failure_detector.cpp
@@ -43,12 +43,15 @@
#include "runtime/service_app.h"
#include "runtime/rpc/rpc_address.h"
#include <vector>
+#include "utils/flags.h"
DSN_DECLARE_int32(max_succssive_unstable_restart);
using namespace dsn;
using namespace dsn::fd;
+DSN_DECLARE_uint64(stable_rs_min_running_seconds);
+
#define MPORT_START 30001
#define WPORT 40001
#define MCOUNT 3
@@ -204,7 +207,7 @@ public:
error_code start(const std::vector<std::string> &args) override
{
- _opts.stable_rs_min_running_seconds = 10;
+ FLAGS_stable_rs_min_running_seconds = 10;
FLAGS_max_succssive_unstable_restart = 10;
_master_fd = new master_fd_test();
@@ -620,7 +623,7 @@ TEST(fd, update_stability)
fd->toggle_response_ping(true);
replication::fd_suboptions opts;
- opts.stable_rs_min_running_seconds = 5;
+ FLAGS_stable_rs_min_running_seconds = 5;
FLAGS_max_succssive_unstable_restart = 2;
fd->set_options(&opts);
diff --git a/src/meta/meta_options.cpp b/src/meta/meta_options.cpp
index e778a346f..814ee6616 100644
--- a/src/meta/meta_options.cpp
+++ b/src/meta/meta_options.cpp
@@ -67,13 +67,6 @@ void meta_options::initialize()
"meta_state_service provider parameters");
utils::split_args(meta_state_service_parameters, meta_state_service_args);
- node_live_percentage_threshold_for_update = dsn_config_get_value_uint64(
- "meta_server",
- "node_live_percentage_threshold_for_update",
- 65,
- "if live_node_count * 100 < total_node_count *
node_live_percentage_threshold_for_update, "
- "then freeze the cluster; default is 65");
-
meta_function_level_on_start = meta_function_level::fl_invalid;
const char *level_str = dsn_config_get_value_string(
"meta_server", "meta_function_level_on_start", "steady", "meta
function level on start");
@@ -113,11 +106,6 @@ void meta_options::initialize()
"",
"distributed_lock_service provider
parameters");
utils::split_args(distributed_lock_service_parameters,
_fd_opts.distributed_lock_service_args);
- _fd_opts.stable_rs_min_running_seconds =
- dsn_config_get_value_uint64("meta_server",
- "stable_rs_min_running_seconds",
- 600,
- "min running seconds for a stable replica
server");
/// load balancer options
_lb_opts.server_load_balancer_type =
@@ -125,11 +113,6 @@ void meta_options::initialize()
"server_load_balancer_type",
"greedy_load_balancer",
"server load balancer provider");
- _lb_opts.replica_assign_delay_ms_for_dropouts =
- dsn_config_get_value_uint64("meta_server",
- "replica_assign_delay_ms_for_dropouts",
- 300000,
- "replica_assign_delay_ms_for_dropouts,
default is 300000");
_lb_opts.balancer_in_turn = dsn_config_get_value_bool(
"meta_server", "balancer_in_turn", false, "balance the apps
one-by-one/concurrently");
_lb_opts.only_primary_balancer = dsn_config_get_value_bool(
diff --git a/src/meta/meta_options.h b/src/meta/meta_options.h
index a227ecf83..fa1362592 100644
--- a/src/meta/meta_options.h
+++ b/src/meta/meta_options.h
@@ -59,15 +59,12 @@ class fd_suboptions
public:
std::string distributed_lock_service_type;
std::vector<std::string> distributed_lock_service_args;
-
- uint64_t stable_rs_min_running_seconds;
};
class lb_suboptions
{
public:
std::string server_load_balancer_type;
- uint64_t replica_assign_delay_ms_for_dropouts;
bool balancer_in_turn;
bool only_primary_balancer;
@@ -81,7 +78,6 @@ public:
std::string meta_state_service_type;
std::vector<std::string> meta_state_service_args;
- uint64_t node_live_percentage_threshold_for_update;
meta_function_level::type meta_function_level_on_start;
bool recover_from_replica_server;
diff --git a/src/meta/meta_server_failure_detector.cpp
b/src/meta/meta_server_failure_detector.cpp
index 3fec4a440..919dbec12 100644
--- a/src/meta/meta_server_failure_detector.cpp
+++ b/src/meta/meta_server_failure_detector.cpp
@@ -42,6 +42,10 @@ DSN_DEFINE_int32(meta_server,
5,
"meta server will treat a rs unstable so as to reject it is
beacons if "
"its successively restarting count exceeds this value.");
+DSN_DEFINE_uint64(meta_server,
+ stable_rs_min_running_seconds,
+ 600,
+ "The minimal running seconds for a stable replica server");
namespace dsn {
namespace replication {
@@ -216,8 +220,7 @@ bool
meta_server_failure_detector::update_stability_stat(const fd::beacon_msg &b
if (beacon.start_time == w.last_start_time_ms) {
LOG_DEBUG(
"{} isn't restarted, last_start_time({})", beacon.from_addr,
w.last_start_time_ms);
- if (dsn_now_ms() - w.last_start_time_ms >=
- _fd_opts->stable_rs_min_running_seconds * 1000 &&
+ if (dsn_now_ms() - w.last_start_time_ms >=
FLAGS_stable_rs_min_running_seconds * 1000 &&
w.unstable_restart_count > 0) {
LOG_INFO("{} has stably run for a while, reset it's unstable
count({}) to 0",
beacon.from_addr,
@@ -230,7 +233,7 @@ bool
meta_server_failure_detector::update_stability_stat(const fd::beacon_msg &b
w.last_start_time_ms,
beacon.start_time);
if (beacon.start_time - w.last_start_time_ms <
- _fd_opts->stable_rs_min_running_seconds * 1000) {
+ FLAGS_stable_rs_min_running_seconds * 1000) {
w.unstable_restart_count++;
LOG_WARNING("{} encounter an unstable restart,
total_count({})",
beacon.from_addr,
diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp
index 11d6a4a77..2b28b8615 100644
--- a/src/meta/meta_service.cpp
+++ b/src/meta/meta_service.cpp
@@ -62,6 +62,11 @@ DSN_DEFINE_int32(replication,
lb_interval_ms,
10000,
"every this period(ms) the meta server will do load balance");
+DSN_DEFINE_uint64(meta_server,
+ node_live_percentage_threshold_for_update,
+ 65,
+ "If live_node_count * 100 < total_node_count * "
+ "node_live_percentage_threshold_for_update, then freeze the
cluster.");
DSN_DECLARE_int32(fd_beacon_interval_seconds);
DSN_DECLARE_int32(fd_check_interval_seconds);
@@ -73,8 +78,7 @@ meta_service::meta_service()
{
_opts.initialize();
_meta_opts.initialize();
- _node_live_percentage_threshold_for_update =
- _meta_opts.node_live_percentage_threshold_for_update;
+ _node_live_percentage_threshold_for_update =
FLAGS_node_live_percentage_threshold_for_update;
_state.reset(new server_state());
_function_level.store(_meta_opts.meta_function_level_on_start);
if (_meta_opts.recover_from_replica_server) {
@@ -233,7 +237,7 @@ void meta_service::register_ctrl_commands()
} else {
if (args[0] == "DEFAULT") {
_node_live_percentage_threshold_for_update =
-
_meta_opts.node_live_percentage_threshold_for_update;
+ FLAGS_node_live_percentage_threshold_for_update;
} else {
int32_t v = 0;
if (!dsn::buf2int32(args[0], v) || v < 0) {
diff --git a/src/meta/partition_guardian.cpp b/src/meta/partition_guardian.cpp
index 270994433..e636a41ff 100644
--- a/src/meta/partition_guardian.cpp
+++ b/src/meta/partition_guardian.cpp
@@ -25,12 +25,15 @@ namespace dsn {
namespace replication {
DSN_DEFINE_int32(meta_server, max_replicas_in_group, 4, "max replicas(alive &
dead) in a group");
+DSN_DEFINE_uint64(meta_server,
+ replica_assign_delay_ms_for_dropouts,
+ 300000,
+ "The delay milliseconds to dropout replicas assign");
partition_guardian::partition_guardian(meta_service *svc) : _svc(svc)
{
if (svc != nullptr) {
- _replica_assign_delay_ms_for_dropouts =
-
svc->get_meta_options()._lb_opts.replica_assign_delay_ms_for_dropouts;
+ _replica_assign_delay_ms_for_dropouts =
FLAGS_replica_assign_delay_ms_for_dropouts;
config_context::MAX_REPLICA_COUNT_IN_GRROUP =
FLAGS_max_replicas_in_group;
} else {
_replica_assign_delay_ms_for_dropouts = 0;
@@ -483,7 +486,7 @@ pc_status
partition_guardian::on_missing_secondary(meta_view &view, const dsn::g
_svc->get_options().app_mutation_2pc_min_replica_count(pc.max_replica_count)) {
// ATTENTION:
// when max_replica_count == 2, even if there is only 1 replica alive
now, we will still
- // wait for replica_assign_delay_ms_for_dropouts before recover the
second replica.
+ // wait for '_replica_assign_delay_ms_for_dropouts' before recover the
second replica.
is_emergency = true;
LOG_INFO("gpid({}): is emergency due to too few replicas", gpid);
} else if (cc.dropped.empty()) {
@@ -669,6 +672,7 @@ void partition_guardian::finish_cure_proposal(meta_view
&view,
void partition_guardian::register_ctrl_commands()
{
+ // TODO(yingchun): update _replica_assign_delay_ms_for_dropouts by http
_cmds.emplace_back(dsn::command_manager::instance().register_command(
{"meta.lb.assign_delay_ms"},
"lb.assign_delay_ms [num | DEFAULT]",
@@ -691,8 +695,7 @@ std::string partition_guardian::ctrl_assign_delay_ms(const
std::vector<std::stri
result = std::to_string(_replica_assign_delay_ms_for_dropouts);
} else {
if (args[0] == "DEFAULT") {
- _replica_assign_delay_ms_for_dropouts =
-
_svc->get_meta_options()._lb_opts.replica_assign_delay_ms_for_dropouts;
+ _replica_assign_delay_ms_for_dropouts =
FLAGS_replica_assign_delay_ms_for_dropouts;
} else {
int32_t v = 0;
if (!dsn::buf2int32(args[0], v) || v <= 0) {
diff --git a/src/meta/test/meta_app_operation_test.cpp
b/src/meta/test/meta_app_operation_test.cpp
index 9bdeba6b4..7b2c0c794 100644
--- a/src/meta/test/meta_app_operation_test.cpp
+++ b/src/meta/test/meta_app_operation_test.cpp
@@ -32,9 +32,9 @@
namespace dsn {
namespace replication {
-DSN_DECLARE_uint64(min_live_node_count_for_unfreeze);
-DSN_DECLARE_int32(min_allowed_replica_count);
DSN_DECLARE_int32(max_allowed_replica_count);
+DSN_DECLARE_int32(min_allowed_replica_count);
+DSN_DECLARE_uint64(min_live_node_count_for_unfreeze);
class meta_app_operation_test : public meta_test_base
{
@@ -394,7 +394,7 @@ TEST_F(meta_app_operation_test, create_app)
std::vector<rpc_address> nodes =
ensure_enough_alive_nodes(total_node_count);
// the meta function level will become freezed once
- // alive_nodes * 100 < total_nodes *
node_live_percentage_threshold_for_update
+ // alive_nodes * 100 < total_nodes *
_node_live_percentage_threshold_for_update
// even if alive_nodes >= min_live_node_count_for_unfreeze
set_node_live_percentage_threshold_for_update(0);
@@ -726,9 +726,9 @@ TEST_F(meta_app_operation_test, set_max_replica_count)
<< ", max_allowed_replica_count=" <<
test.max_allowed_replica_count
<< ", expected_err=" << test.expected_err << std::endl;
- // disable node_live_percentage_threshold_for_update
+ // disable _node_live_percentage_threshold_for_update
// for the reason that the meta function level will become freezed once
- // alive_nodes * 100 < total_nodes *
node_live_percentage_threshold_for_update
+ // alive_nodes * 100 < total_nodes *
_node_live_percentage_threshold_for_update
// even if alive_nodes >= min_live_node_count_for_unfreeze
set_node_live_percentage_threshold_for_update(0);
diff --git a/src/meta/test/update_configuration_test.cpp
b/src/meta/test/update_configuration_test.cpp
index 40cd2e225..6b463675a 100644
--- a/src/meta/test/update_configuration_test.cpp
+++ b/src/meta/test/update_configuration_test.cpp
@@ -54,6 +54,8 @@ namespace dsn {
namespace replication {
DSN_DECLARE_uint64(min_live_node_count_for_unfreeze);
+DSN_DECLARE_uint64(node_live_percentage_threshold_for_update);
+DSN_DECLARE_uint64(replica_assign_delay_ms_for_dropouts);
class fake_sender_meta_service : public dsn::replication::meta_service
{
@@ -267,7 +269,7 @@ void meta_service_test_app::update_configuration_test()
};
// the default delay for add node is 5 miniutes
ASSERT_FALSE(wait_state(ss, validator3, 10));
- svc->_meta_opts._lb_opts.replica_assign_delay_ms_for_dropouts = 0;
+ FLAGS_replica_assign_delay_ms_for_dropouts = 0;
svc->_partition_guardian.reset(new partition_guardian(svc.get()));
svc->_balancer.reset(new dummy_balancer(svc.get()));
ASSERT_TRUE(wait_state(ss, validator3, 10));
@@ -452,7 +454,7 @@ void meta_service_test_app::cannot_run_balancer_test()
// set FLAGS_min_live_node_count_for_unfreeze directly to bypass its flag
validator
FLAGS_min_live_node_count_for_unfreeze = 0;
- svc->_meta_opts.node_live_percentage_threshold_for_update = 0;
+ FLAGS_node_live_percentage_threshold_for_update = 0;
svc->_state->initialize(svc.get(), "/");
svc->_failure_detector.reset(new meta_server_failure_detector(svc.get()));
diff --git a/src/replica/replica_2pc.cpp b/src/replica/replica_2pc.cpp
index 3d97fd400..b73c75ab9 100644
--- a/src/replica/replica_2pc.cpp
+++ b/src/replica/replica_2pc.cpp
@@ -64,6 +64,11 @@ DSN_DEFINE_int32(replication,
log_shared_pending_size_throttling_delay_ms,
0,
"log_shared_pending_size_throttling_delay_ms");
+DSN_DEFINE_uint64(
+ replication,
+ max_allowed_write_size,
+ 1 << 20,
+ "write operation exceed this threshold will be logged and reject, 0 means
not check");
DSN_DECLARE_int32(max_mutation_count_in_prepare_list);
DSN_DECLARE_int32(staleness_for_commit);
@@ -89,17 +94,17 @@ void replica::on_client_write(dsn::message_ex *request,
bool ignore_throttling)
return;
}
- if (dsn_unlikely(_stub->_max_allowed_write_size &&
- request->body_size() > _stub->_max_allowed_write_size)) {
+ if (dsn_unlikely(FLAGS_max_allowed_write_size &&
+ request->body_size() > FLAGS_max_allowed_write_size)) {
std::string request_info = _app->dump_write_request(request);
LOG_WARNING_PREFIX(
"client from {} write request body size exceed threshold, request
= [{}], "
"request_body_size "
- "= {}, max_allowed_write_size = {}, it will be rejected!",
+ "= {}, FLAGS_max_allowed_write_size = {}, it will be rejected!",
request->header->from_address.to_string(),
request_info,
request->body_size(),
- _stub->_max_allowed_write_size);
+ FLAGS_max_allowed_write_size);
_stub->_counter_recent_write_size_exceed_threshold_count->increment();
response_client_write(request, ERR_INVALID_DATA);
return;
diff --git a/src/replica/replica_stub.cpp b/src/replica/replica_stub.cpp
index 80300a987..4e7f6acf7 100644
--- a/src/replica/replica_stub.cpp
+++ b/src/replica/replica_stub.cpp
@@ -145,13 +145,6 @@ replica_stub::replica_stub(replica_state_subscriber
subscriber /*= nullptr*/,
_log = nullptr;
_primary_address_str[0] = '\0';
install_perf_counters();
-
- _max_allowed_write_size = dsn_config_get_value_uint64("replication",
-
"max_allowed_write_size",
- 1 << 20,
- "write operation
exceed this "
- "threshold will be
logged and reject, "
- "default is 1MB, 0
means no check");
}
replica_stub::~replica_stub(void) { close(); }
diff --git a/src/replica/replica_stub.h b/src/replica/replica_stub.h
index 041894edf..693ed6a27 100644
--- a/src/replica/replica_stub.h
+++ b/src/replica/replica_stub.h
@@ -398,9 +398,6 @@ private:
// nfs_node
std::unique_ptr<dsn::nfs_node> _nfs;
- // write body size exceed this threshold will be logged and reject, 0
means no check
- uint64_t _max_allowed_write_size;
-
// replica count executing bulk load downloading concurrently
std::atomic_int _bulk_load_downloading_count;
diff --git a/src/server/capacity_unit_calculator.cpp
b/src/server/capacity_unit_calculator.cpp
index 0bde5f961..1acb0c6d7 100644
--- a/src/server/capacity_unit_calculator.cpp
+++ b/src/server/capacity_unit_calculator.cpp
@@ -24,10 +24,25 @@
#include <rocksdb/status.h>
#include "hotkey_collector.h"
#include "utils/fmt_logging.h"
+#include "utils/flags.h"
namespace pegasus {
namespace server {
+DSN_DEFINE_uint64(pegasus.server,
+ perf_counter_read_capacity_unit_size,
+ 4 * 1024,
+ "capacity unit size of read requests, default 4KB");
+DSN_DEFINE_validator(perf_counter_read_capacity_unit_size,
+ [](const uint64_t value) -> bool { return
powerof2(value); });
+
+DSN_DEFINE_uint64(pegasus.server,
+ perf_counter_write_capacity_unit_size,
+ 4 * 1024,
+ "capacity unit size of write requests, default 4KB");
+DSN_DEFINE_validator(perf_counter_write_capacity_unit_size,
+ [](const uint64_t value) -> bool { return
powerof2(value); });
+
capacity_unit_calculator::capacity_unit_calculator(
replica_base *r,
std::shared_ptr<hotkey_collector> read_hotkey_collector,
@@ -42,22 +57,8 @@ capacity_unit_calculator::capacity_unit_calculator(
CHECK(_write_hotkey_collector, "write hotkey collector is a nullptr");
CHECK(_read_size_throttling_controller, "_read_size_throttling_controller
is a nullptr");
- _read_capacity_unit_size =
- dsn_config_get_value_uint64("pegasus.server",
- "perf_counter_read_capacity_unit_size",
- 4 * 1024,
- "capacity unit size of read requests,
default 4KB");
- _write_capacity_unit_size =
- dsn_config_get_value_uint64("pegasus.server",
- "perf_counter_write_capacity_unit_size",
- 4 * 1024,
- "capacity unit size of write requests,
default 4KB");
- CHECK(powerof2(_read_capacity_unit_size),
- "'perf_counter_read_capacity_unit_size' must be a power of 2");
- CHECK(powerof2(_write_capacity_unit_size),
- "'perf_counter_write_capacity_unit_size' must be a power of 2");
- _log_read_cu_size = log(_read_capacity_unit_size) / log(2);
- _log_write_cu_size = log(_write_capacity_unit_size) / log(2);
+ _log_read_cu_size = log(FLAGS_perf_counter_read_capacity_unit_size) /
log(2);
+ _log_write_cu_size = log(FLAGS_perf_counter_write_capacity_unit_size) /
log(2);
std::string str_gpid = r->get_gpid().to_string();
char name[256];
@@ -111,9 +112,10 @@ capacity_unit_calculator::capacity_unit_calculator(
int64_t capacity_unit_calculator::add_read_cu(int64_t read_data_size)
{
- int64_t read_cu = read_data_size > 0
- ? (read_data_size + _read_capacity_unit_size - 1) >>
_log_read_cu_size
- : 1;
+ int64_t read_cu =
+ read_data_size > 0
+ ? (read_data_size + FLAGS_perf_counter_read_capacity_unit_size -
1) >> _log_read_cu_size
+ : 1;
_pfc_recent_read_cu->add(read_cu);
_read_size_throttling_controller->consume_token(read_data_size);
return read_cu;
@@ -122,7 +124,8 @@ int64_t capacity_unit_calculator::add_read_cu(int64_t
read_data_size)
int64_t capacity_unit_calculator::add_write_cu(int64_t write_data_size)
{
int64_t write_cu = write_data_size > 0
- ? (write_data_size + _write_capacity_unit_size - 1)
>> _log_write_cu_size
+ ? (write_data_size +
FLAGS_perf_counter_write_capacity_unit_size - 1) >>
+ _log_write_cu_size
: 1;
_pfc_recent_write_cu->add(write_cu);
return write_cu;
diff --git a/src/server/capacity_unit_calculator.h
b/src/server/capacity_unit_calculator.h
index 404c2964a..868862306 100644
--- a/src/server/capacity_unit_calculator.h
+++ b/src/server/capacity_unit_calculator.h
@@ -95,8 +95,6 @@ protected:
#endif
private:
- uint64_t _read_capacity_unit_size;
- uint64_t _write_capacity_unit_size;
uint32_t _log_read_cu_size;
uint32_t _log_write_cu_size;
diff --git a/src/server/pegasus_server_impl.cpp
b/src/server/pegasus_server_impl.cpp
index 1013e4d71..4deafe27e 100644
--- a/src/server/pegasus_server_impl.cpp
+++ b/src/server/pegasus_server_impl.cpp
@@ -50,9 +50,12 @@ namespace pegasus {
namespace server {
DEFINE_TASK_CODE(LPC_PEGASUS_SERVER_DELAY, TASK_PRIORITY_COMMON,
::dsn::THREAD_POOL_DEFAULT)
+
DSN_DECLARE_int32(read_amp_bytes_per_bit);
DSN_DECLARE_uint32(checkpoint_reserve_min_count);
DSN_DECLARE_uint32(checkpoint_reserve_time_seconds);
+DSN_DECLARE_uint64(rocksdb_iteration_threshold_time_ms);
+DSN_DECLARE_uint64(rocksdb_slow_query_threshold_ns);
DSN_DEFINE_int32(pegasus.server,
hotkey_analyse_time_interval_s,
@@ -2700,7 +2703,7 @@ void pegasus_server_impl::update_throttling_controller(
void pegasus_server_impl::update_slow_query_threshold(
const std::map<std::string, std::string> &envs)
{
- uint64_t threshold_ns = _slow_query_threshold_ns_in_config;
+ uint64_t threshold_ns = FLAGS_rocksdb_slow_query_threshold_ns;
auto find = envs.find(ROCKSDB_ENV_SLOW_QUERY_THRESHOLD);
if (find != envs.end()) {
// get slow query from env(the unit of slow query from env is ms)
@@ -2725,7 +2728,7 @@ void pegasus_server_impl::update_slow_query_threshold(
void pegasus_server_impl::update_rocksdb_iteration_threshold(
const std::map<std::string, std::string> &envs)
{
- uint64_t threshold_ms =
_rng_rd_opts.rocksdb_iteration_threshold_time_ms_in_config;
+ uint64_t threshold_ms = FLAGS_rocksdb_iteration_threshold_time_ms;
auto find = envs.find(ROCKSDB_ITERATION_THRESHOLD_TIME_MS);
if (find != envs.end()) {
// the unit of iteration threshold from env is ms
diff --git a/src/server/pegasus_server_impl.h b/src/server/pegasus_server_impl.h
index 8e651b9a3..c7625201b 100644
--- a/src/server/pegasus_server_impl.h
+++ b/src/server/pegasus_server_impl.h
@@ -50,6 +50,9 @@ namespace server {
DSN_DECLARE_uint64(rocksdb_abnormal_batch_get_bytes_threshold);
DSN_DECLARE_uint64(rocksdb_abnormal_batch_get_count_threshold);
+DSN_DECLARE_uint64(rocksdb_abnormal_get_size_threshold);
+DSN_DECLARE_uint64(rocksdb_abnormal_multi_get_iterate_count_threshold);
+DSN_DECLARE_uint64(rocksdb_abnormal_multi_get_size_threshold);
class meta_store;
class capacity_unit_calculator;
@@ -349,11 +352,12 @@ private:
bool is_multi_get_abnormal(uint64_t time_used, uint64_t size, uint64_t
iterate_count)
{
- if (_abnormal_multi_get_size_threshold && size >=
_abnormal_multi_get_size_threshold) {
+ if (FLAGS_rocksdb_abnormal_multi_get_size_threshold > 0 &&
+ size >= FLAGS_rocksdb_abnormal_multi_get_size_threshold) {
return true;
}
- if (_abnormal_multi_get_iterate_count_threshold &&
- iterate_count >= _abnormal_multi_get_iterate_count_threshold) {
+ if (FLAGS_rocksdb_abnormal_multi_get_iterate_count_threshold > 0 &&
+ iterate_count >=
FLAGS_rocksdb_abnormal_multi_get_iterate_count_threshold) {
return true;
}
if (time_used >= _slow_query_threshold_ns) {
@@ -382,7 +386,8 @@ private:
bool is_get_abnormal(uint64_t time_used, uint64_t value_size)
{
- if (_abnormal_get_size_threshold && value_size >=
_abnormal_get_size_threshold) {
+ if (FLAGS_rocksdb_abnormal_get_size_threshold > 0 &&
+ value_size >= FLAGS_rocksdb_abnormal_get_size_threshold) {
return true;
}
if (time_used >= _slow_query_threshold_ns) {
@@ -416,12 +421,8 @@ private:
dsn::gpid _gpid;
std::string _primary_address;
bool _verbose_log;
- uint64_t _abnormal_get_size_threshold;
- uint64_t _abnormal_multi_get_size_threshold;
- uint64_t _abnormal_multi_get_iterate_count_threshold;
// slow query time threshold. exceed this threshold will be logged.
uint64_t _slow_query_threshold_ns;
- uint64_t _slow_query_threshold_ns_in_config;
range_read_limiter_options _rng_rd_opts;
diff --git a/src/server/pegasus_server_impl_init.cpp
b/src/server/pegasus_server_impl_init.cpp
index edbb49c7c..4e38f1621 100644
--- a/src/server/pegasus_server_impl_init.cpp
+++ b/src/server/pegasus_server_impl_init.cpp
@@ -199,6 +199,79 @@ DSN_DEFINE_int32(pegasus.server,
-1,
"The number of opened files that can be used by a
replica(namely a DB instance). "
"The default value is -1 which means always keep files
opened.");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_slow_query_threshold_ns,
+ 100000000,
+ "get/multi-get operation duration exceed this threshold will
be logged");
+DSN_DEFINE_validator(rocksdb_slow_query_threshold_ns,
+ [](uint64_t value) -> bool { return value > 0; });
+DSN_DEFINE_uint64(
+ pegasus.server,
+ rocksdb_abnormal_get_size_threshold,
+ 1000000,
+ "get operation value size exceed this threshold will be logged, 0 means no
check");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_abnormal_multi_get_size_threshold,
+ 10000000,
+ "multi-get operation total key-value size exceed this
threshold will be logged, "
+ "0 means no check");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_abnormal_multi_get_iterate_count_threshold,
+ 1000,
+ "multi-get operation iterate count exceed this threshold
will be logged, 0 means "
+ "no check");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_multi_get_max_iteration_size,
+ 30 << 20,
+ "multi-get operation total key-value size exceed this
threshold will stop "
+ "iterating rocksdb, 0 means no check");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_iteration_threshold_time_ms,
+ 30000,
+ "max duration for handling one pegasus scan
request(sortkey_count/multiget/scan) "
+ "if exceed this threshold, iterator will be stopped, 0 means
no check");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_compaction_readahead_size,
+ 2 * 1024 * 1024,
+ "rocksdb options.compaction_readahead_size");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_writable_file_max_buffer_size,
+ 1024 * 1024,
+ "rocksdb options.writable_file_max_buffer_size");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_write_buffer_size,
+ 64 * 1024 * 1024,
+ "rocksdb options.write_buffer_size");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_target_file_size_base,
+ 64 * 1024 * 1024,
+ "rocksdb options.target_file_size_base");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_max_bytes_for_level_base,
+ 10 * 64 * 1024 * 1024,
+ "rocksdb options.max_bytes_for_level_base");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_block_cache_capacity,
+ 10 * 1024 * 1024 * 1024ULL,
+ "block cache capacity for one pegasus server, shared by all
rocksdb instances");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_total_size_across_write_buffer,
+ 0,
+ "total size limit used by memtables across multiple
replicas");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_metadata_block_size,
+ 4096,
+ "Block size for partitioned metadata. Currently applied to
indexes when "
+ "two_level_index_search is used and to filters when
partition_filters is used. "
+ "Note: Since in the current implementation the filters and
index partitions "
+ "are aligned, an index/filter block is created when either
index or filter "
+ "block size reaches the specified limit. "
+ "Note: this limit is currently applied to only index blocks;
a filter "
+ "partition is cut right after an index block is cut");
+DSN_DEFINE_uint64(pegasus.server,
+ rocksdb_periodic_compaction_seconds,
+ 0,
+ "periodic_compaction_seconds, 0 means no periodic
compaction");
static const std::unordered_map<std::string,
rocksdb::BlockBasedTableOptions::IndexType>
INDEX_TYPE_STRING_MAP = {
@@ -236,49 +309,11 @@
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
"rocksdb_verbose_log",
false,
"whether to print verbose log for
debugging");
- _slow_query_threshold_ns_in_config = dsn_config_get_value_uint64(
- "pegasus.server",
- "rocksdb_slow_query_threshold_ns",
- 100000000,
- "get/multi-get operation duration exceed this threshold will be
logged");
- _slow_query_threshold_ns = _slow_query_threshold_ns_in_config;
- CHECK_GT(_slow_query_threshold_ns, 0);
- _abnormal_get_size_threshold = dsn_config_get_value_uint64(
- "pegasus.server",
- "rocksdb_abnormal_get_size_threshold",
- 1000000,
- "get operation value size exceed this threshold will be logged, 0
means no check");
- _abnormal_multi_get_size_threshold =
- dsn_config_get_value_uint64("pegasus.server",
-
"rocksdb_abnormal_multi_get_size_threshold",
- 10000000,
- "multi-get operation total key-value size
exceed this "
- "threshold will be logged, 0 means no
check");
- _abnormal_multi_get_iterate_count_threshold = dsn_config_get_value_uint64(
- "pegasus.server",
- "rocksdb_abnormal_multi_get_iterate_count_threshold",
- 1000,
- "multi-get operation iterate count exceed this threshold will be
logged, 0 means no check");
-
+ _slow_query_threshold_ns = FLAGS_rocksdb_slow_query_threshold_ns;
_rng_rd_opts.multi_get_max_iteration_count =
FLAGS_rocksdb_multi_get_max_iteration_count;
-
- _rng_rd_opts.multi_get_max_iteration_size =
- dsn_config_get_value_uint64("pegasus.server",
- "rocksdb_multi_get_max_iteration_size",
- 30 << 20,
- "multi-get operation total key-value size
exceed "
- "this threshold will stop iterating
rocksdb, 0 means no check");
-
+ _rng_rd_opts.multi_get_max_iteration_size =
FLAGS_rocksdb_multi_get_max_iteration_size;
_rng_rd_opts.rocksdb_max_iteration_count =
FLAGS_rocksdb_max_iteration_count;
-
- _rng_rd_opts.rocksdb_iteration_threshold_time_ms_in_config =
dsn_config_get_value_uint64(
- "pegasus.server",
- "rocksdb_iteration_threshold_time_ms",
- 30000,
- "max duration for handling one pegasus scan
request(sortkey_count/multiget/scan) if exceed "
- "this threshold, iterator will be stopped, 0 means no check");
- _rng_rd_opts.rocksdb_iteration_threshold_time_ms =
- _rng_rd_opts.rocksdb_iteration_threshold_time_ms_in_config;
+ _rng_rd_opts.rocksdb_iteration_threshold_time_ms =
FLAGS_rocksdb_iteration_threshold_time_ms;
// init rocksdb::DBOptions
_db_opts.create_if_missing = true;
@@ -294,54 +329,23 @@
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
"rocksdb_use_direct_io_for_flush_and_compaction",
false,
"rocksdb
options.use_direct_io_for_flush_and_compaction");
-
- // TODO(yingchun): size_t, uint64_t
- _db_opts.compaction_readahead_size =
- dsn_config_get_value_uint64("pegasus.server",
- "rocksdb_compaction_readahead_size",
- 2 * 1024 * 1024,
- "rocksdb
options.compaction_readahead_size");
- // TODO(yingchun): size_t, uint64_t
- _db_opts.writable_file_max_buffer_size =
- dsn_config_get_value_uint64("pegasus.server",
- "rocksdb_writable_file_max_buffer_size",
- 1024 * 1024,
- "rocksdb
options.writable_file_max_buffer_size");
+ _db_opts.compaction_readahead_size =
FLAGS_rocksdb_compaction_readahead_size;
+ _db_opts.writable_file_max_buffer_size =
FLAGS_rocksdb_writable_file_max_buffer_size;
_statistics = rocksdb::CreateDBStatistics();
_statistics->set_stats_level(rocksdb::kExceptDetailedTimers);
_db_opts.statistics = _statistics;
_db_opts.listeners.emplace_back(new pegasus_event_listener(this));
-
_db_opts.max_background_flushes = FLAGS_rocksdb_max_background_flushes;
_db_opts.max_background_compactions =
FLAGS_rocksdb_max_background_compactions;
-
// init rocksdb::ColumnFamilyOptions for data column family
- // TODO(yingchun): size_t, uint64_t
- _data_cf_opts.write_buffer_size =
- (size_t)dsn_config_get_value_uint64("pegasus.server",
- "rocksdb_write_buffer_size",
- 64 * 1024 * 1024,
- "rocksdb
options.write_buffer_size");
-
+ _data_cf_opts.write_buffer_size = FLAGS_rocksdb_write_buffer_size;
_data_cf_opts.max_write_buffer_number =
FLAGS_rocksdb_max_write_buffer_number;
_data_cf_opts.num_levels = FLAGS_rocksdb_num_levels;
- // TODO(yingchun): size_t, uint64_t
- _data_cf_opts.target_file_size_base =
- dsn_config_get_value_uint64("pegasus.server",
- "rocksdb_target_file_size_base",
- 64 * 1024 * 1024,
- "rocksdb options.target_file_size_base");
-
+ _data_cf_opts.target_file_size_base = FLAGS_rocksdb_target_file_size_base;
_data_cf_opts.target_file_size_multiplier =
FLAGS_rocksdb_target_file_size_multiplier;
- // TODO(yingchun): size_t, uint64_t
- _data_cf_opts.max_bytes_for_level_base =
- dsn_config_get_value_uint64("pegasus.server",
- "rocksdb_max_bytes_for_level_base",
- 10 * 64 * 1024 * 1024,
- "rocksdb
options.max_bytes_for_level_base");
-
+ _data_cf_opts.max_bytes_for_level_base =
FLAGS_rocksdb_max_bytes_for_level_base;
_data_cf_opts.max_bytes_for_level_multiplier =
dsn_config_get_value_double("pegasus.server",
"rocksdb_max_bytes_for_level_multiplier",
@@ -388,15 +392,9 @@
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
// algorithm used by the block cache object can be more efficient in
this way.
static std::once_flag flag;
std::call_once(flag, [&]() {
- uint64_t capacity = dsn_config_get_value_uint64(
- "pegasus.server",
- "rocksdb_block_cache_capacity",
- 10 * 1024 * 1024 * 1024ULL,
- "block cache capacity for one pegasus server, shared by all
rocksdb instances");
-
// init block cache
- _s_block_cache =
- rocksdb::NewLRUCache(capacity,
FLAGS_rocksdb_block_cache_num_shard_bits);
+ _s_block_cache =
rocksdb::NewLRUCache(FLAGS_rocksdb_block_cache_capacity,
+
FLAGS_rocksdb_block_cache_num_shard_bits);
});
// every replica has the same block cache
@@ -432,20 +430,16 @@
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
// thus the same block cache object. It's convenient to control the
total memory
// of memtables and block caches used by this server.
//
- // While write buffer manager is enabled,
total_size_across_write_buffer = 0
+ // While write buffer manager is enabled,
FLAGS_rocksdb_total_size_across_write_buffer = 0
// indicates no limit on memory, for details see:
//
https://github.com/facebook/rocksdb/blob/v6.6.4/include/rocksdb/write_buffer_manager.h#L23-24
static std::once_flag flag;
std::call_once(flag, [&]() {
- uint64_t total_size_across_write_buffer =
dsn_config_get_value_uint64(
- "pegasus.server",
- "rocksdb_total_size_across_write_buffer",
- 0,
- "total size limit used by memtables across multiple replicas");
LOG_INFO_PREFIX("rocksdb_total_size_across_write_buffer = {}",
- total_size_across_write_buffer);
+ FLAGS_rocksdb_total_size_across_write_buffer);
_s_write_buffer_manager =
std::make_shared<rocksdb::WriteBufferManager>(
- static_cast<size_t>(total_size_across_write_buffer),
tbl_opts.block_cache);
+
static_cast<size_t>(FLAGS_rocksdb_total_size_across_write_buffer),
+ tbl_opts.block_cache);
});
_db_opts.write_buffer_manager = _s_write_buffer_manager;
}
@@ -483,17 +477,7 @@
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
"incompatibile with block-based filters.");
LOG_INFO_PREFIX("rocksdb_partition_filters = {}",
tbl_opts.partition_filters);
- tbl_opts.metadata_block_size = dsn_config_get_value_uint64(
- "pegasus.server",
- "rocksdb_metadata_block_size",
- 4096,
- "Block size for partitioned metadata. Currently applied to indexes
when "
- "two_level_index_search is used and to filters when partition_filters
is used. "
- "Note: Since in the current implementation the filters and index
partitions "
- "are aligned, an index/filter block is created when either index or
filter "
- "block size reaches the specified limit. "
- "Note: this limit is currently applied to only index blocks; a filter "
- "partition is cut right after an index block is cut");
+ tbl_opts.metadata_block_size = FLAGS_rocksdb_metadata_block_size;
LOG_INFO_PREFIX("rocksdb_metadata_block_size = {}",
tbl_opts.metadata_block_size);
tbl_opts.cache_index_and_filter_blocks = dsn_config_get_value_bool(
@@ -584,12 +568,7 @@
pegasus_server_impl::pegasus_server_impl(dsn::replication::replica *r)
_key_ttl_compaction_filter_factory =
std::make_shared<KeyWithTTLCompactionFilterFactory>();
_data_cf_opts.compaction_filter_factory =
_key_ttl_compaction_filter_factory;
-
- _data_cf_opts.periodic_compaction_seconds =
- dsn_config_get_value_uint64("pegasus.server",
- "rocksdb_periodic_compaction_seconds",
- 0,
- "periodic_compaction_seconds, 0 means no
periodic compaction");
+ _data_cf_opts.periodic_compaction_seconds =
FLAGS_rocksdb_periodic_compaction_seconds;
_checkpoint_reserve_min_count = FLAGS_checkpoint_reserve_min_count;
_checkpoint_reserve_time_seconds = FLAGS_checkpoint_reserve_time_seconds;
diff --git a/src/server/range_read_limiter.h b/src/server/range_read_limiter.h
index 9c8014713..ef17412d5 100644
--- a/src/server/range_read_limiter.h
+++ b/src/server/range_read_limiter.h
@@ -31,7 +31,6 @@ struct range_read_limiter_options
uint32_t multi_get_max_iteration_count;
uint64_t multi_get_max_iteration_size;
uint32_t rocksdb_max_iteration_count;
- uint64_t rocksdb_iteration_threshold_time_ms_in_config;
uint64_t rocksdb_iteration_threshold_time_ms;
};
diff --git a/src/server/test/capacity_unit_calculator_test.cpp
b/src/server/test/capacity_unit_calculator_test.cpp
index ef94adb9f..4eb8df423 100644
--- a/src/server/test/capacity_unit_calculator_test.cpp
+++ b/src/server/test/capacity_unit_calculator_test.cpp
@@ -24,10 +24,14 @@
#include "utils/token_bucket_throttling_controller.h"
#include "pegasus_key_schema.h"
#include "server/hotkey_collector.h"
+#include "utils/flags.h"
namespace pegasus {
namespace server {
+DSN_DECLARE_uint64(perf_counter_read_capacity_unit_size);
+DSN_DECLARE_uint64(perf_counter_write_capacity_unit_size);
+
class mock_capacity_unit_calculator : public capacity_unit_calculator
{
public:
@@ -90,8 +94,8 @@ public:
void test_init()
{
- ASSERT_EQ(_cal->_read_capacity_unit_size, 4096);
- ASSERT_EQ(_cal->_write_capacity_unit_size, 4096);
+ ASSERT_EQ(FLAGS_perf_counter_read_capacity_unit_size, 4096);
+ ASSERT_EQ(FLAGS_perf_counter_write_capacity_unit_size, 4096);
ASSERT_EQ(_cal->_log_read_cu_size, 12);
ASSERT_EQ(_cal->_log_write_cu_size, 12);
diff --git a/src/test/bench_test/benchmark.cpp
b/src/test/bench_test/benchmark.cpp
index 6b7372b6d..843bd21c1 100644
--- a/src/test/bench_test/benchmark.cpp
+++ b/src/test/bench_test/benchmark.cpp
@@ -31,6 +31,15 @@
namespace pegasus {
namespace test {
+DSN_DEFINE_uint64(pegasus.benchmark,
+ benchmark_num,
+ 10000,
+ "Number of key/values to place in database");
+DSN_DEFINE_uint64(pegasus.benchmark,
+ benchmark_seed,
+ 1000,
+ "Seed base for random number generators. When 0 it is
deterministic");
+
DSN_DECLARE_int32(hashkey_size);
DSN_DECLARE_int32(pegasus_timeout_ms);
DSN_DECLARE_int32(sortkey_size);
@@ -76,8 +85,11 @@ void benchmark::run_benchmark(int thread_count,
operation_type op_type)
// create thread args for each thread, and run them
std::vector<std::shared_ptr<thread_arg>> args;
for (int i = 0; i < thread_count; i++) {
- args.push_back(
- std::make_shared<thread_arg>(i + config::instance().seed,
hist_stats, method, this));
+ args.push_back(std::make_shared<thread_arg>(
+ i + (FLAGS_benchmark_seed == 0 ? 1000 : FLAGS_benchmark_seed),
+ hist_stats,
+ method,
+ this));
config::instance().env->StartThread(thread_body, args[i].get());
}
@@ -110,7 +122,7 @@ void benchmark::write_random(thread_arg *thread)
// do write operation num times
uint64_t bytes = 0;
int count = 0;
- for (int i = 0; i < config::instance().num; i++) {
+ for (int i = 0; i < FLAGS_benchmark_num; i++) {
// generate hash key and sort key
std::string hashkey, sortkey, value;
generate_kv_pair(hashkey, sortkey, value);
@@ -144,7 +156,7 @@ void benchmark::read_random(thread_arg *thread)
{
uint64_t bytes = 0;
uint64_t found = 0;
- for (int i = 0; i < config::instance().num; i++) {
+ for (int i = 0; i < FLAGS_benchmark_num; i++) {
// generate hash key and sort key
// generate value for random to keep in peace with write
std::string hashkey, sortkey, value;
@@ -174,7 +186,7 @@ void benchmark::read_random(thread_arg *thread)
}
// count total read bytes and hit rate
- std::string msg = fmt::format("({} of {} found)", found,
config::instance().num);
+ std::string msg = fmt::format("({} of {} found)", found,
FLAGS_benchmark_num);
thread->stats.add_bytes(bytes);
thread->stats.add_message(msg);
}
@@ -182,7 +194,7 @@ void benchmark::read_random(thread_arg *thread)
void benchmark::delete_random(thread_arg *thread)
{
// do delete operation num times
- for (int i = 0; i < config::instance().num; i++) {
+ for (int i = 0; i < FLAGS_benchmark_num; i++) {
// generate hash key and sort key
// generate value for random to keep in peace with write
std::string hashkey, sortkey, value;
@@ -237,10 +249,11 @@ void benchmark::print_header()
fmt::print(stdout, "Hashkeys: {} bytes each\n", FLAGS_hashkey_size);
fmt::print(stdout, "Sortkeys: {} bytes each\n", FLAGS_sortkey_size);
fmt::print(stdout, "Values: {} bytes each\n", FLAGS_value_size);
- fmt::print(stdout, "Entries: {}\n", config_.num);
- fmt::print(stdout,
- "FileSize: {} MB (estimated)\n",
- ((FLAGS_hashkey_size + FLAGS_sortkey_size + FLAGS_value_size) *
config_.num) >> 20);
+ fmt::print(stdout, "Entries: {}\n", FLAGS_benchmark_num);
+ fmt::print(
+ stdout,
+ "FileSize: {} MB (estimated)\n",
+ ((FLAGS_hashkey_size + FLAGS_sortkey_size + FLAGS_value_size) *
FLAGS_benchmark_num) >> 20);
print_warnings();
fmt::print(stdout, "------------------------------------------------\n");
diff --git a/src/test/bench_test/config.cpp b/src/test/bench_test/config.cpp
index 16349f6d7..055e953c9 100644
--- a/src/test/bench_test/config.cpp
+++ b/src/test/bench_test/config.cpp
@@ -48,14 +48,6 @@ config::config()
"\tfillrandom_pegasus -- pegasus write N values in random key
order\n"
"\treadrandom_pegasus -- pegasus read N times in random order\n"
"\tdeleterandom_pegasus -- pegasus delete N keys in random
order\n");
- num = dsn_config_get_value_uint64(
- "pegasus.benchmark", "num", 10000, "Number of key/values to place in
database");
- seed = dsn_config_get_value_uint64(
- "pegasus.benchmark",
- "seed",
- 1000,
- "Seed base for random number generators. When 0 it is deterministic");
- seed = seed ? seed : 1000;
env = rocksdb::Env::Default();
}
} // namespace test
diff --git a/src/test/bench_test/config.h b/src/test/bench_test/config.h
index f6a5f5f1f..1b267dd18 100644
--- a/src/test/bench_test/config.h
+++ b/src/test/bench_test/config.h
@@ -32,10 +32,6 @@ struct config : public dsn::utils::singleton<config>
std::string pegasus_app_name;
// Comma-separated list of operations to run
std::string benchmarks;
- // Number of key/values to place in database
- uint64_t num;
- // Seed base for random number generators
- uint64_t seed;
// Default environment suitable for the current operating system
rocksdb::Env *env;
diff --git a/src/test/bench_test/config.ini b/src/test/bench_test/config.ini
index 4379cdaea..44a455fb6 100644
--- a/src/test/bench_test/config.ini
+++ b/src/test/bench_test/config.ini
@@ -75,10 +75,10 @@ pegasus_cluster_name = onebox
pegasus_app_name = @APP@
pegasus_timeout_ms = @TIMEOUT_MS@
benchmarks = @TYPE@
-num = @NUM@
+benchmark_num = @NUM@
threads = @THREAD@
value_size = @VALUE_SIZE@
hashkey_size = @HASHKEY_SIZE@
sortkey_size = @SORTKEY_SIZE@
-seed = @SEED@
+benchmark_seed = @SEED@
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]