levy5307 commented on a change in pull request #592:
URL: https://github.com/apache/incubator-pegasus/pull/592#discussion_r486749594
##########
File path: src/server/hotspot_partition_calculator.cpp
##########
@@ -34,67 +35,84 @@ DSN_DEFINE_int64("pegasus.collector",
"eliminate outdated historical "
"data");
-void hotspot_partition_calculator::data_aggregate(const std::vector<row_data>
&partitions)
+void hotspot_partition_calculator::data_aggregate(const std::vector<row_data>
&partition_stats)
{
while (_partition_stat_histories.size() > FLAGS_max_hotspot_store_size -
1) {
- _partition_stat_histories.pop();
+ _partition_stat_histories.pop_front();
}
- std::vector<hotspot_partition_data> temp(partitions.size());
- // TODO refactor the data structure
- for (int i = 0; i < partitions.size(); i++) {
- temp[i] = std::move(hotspot_partition_data(partitions[i]));
+ std::vector<hotspot_partition_data> temp;
+ for (const auto &partition_stat : partition_stats) {
+ temp.emplace_back(hotspot_partition_data(partition_stat));
}
- _partition_stat_histories.emplace(temp);
+ _partition_stat_histories.emplace_back(temp);
}
void hotspot_partition_calculator::init_perf_counter(int partition_count)
{
- std::string counter_name;
- std::string counter_desc;
+ std::string read_counter_name, write_counter_name;
+ std::string read_counter_desc, write_counter_desc;
for (int i = 0; i < partition_count; i++) {
- string partition_desc = _app_name + '.' + std::to_string(i);
- counter_name = fmt::format("app.stat.hotspots@{}", partition_desc);
- counter_desc = fmt::format("statistic the hotspots of app {}",
partition_desc);
- _hot_points[i].init_app_counter(
- "app.pegasus", counter_name.c_str(), COUNTER_TYPE_NUMBER,
counter_desc.c_str());
+ string read_partition_desc = _app_name + '.' + "read." +
std::to_string(i);
+ read_counter_name = fmt::format("app.stat.hotspots@{}",
read_partition_desc);
+ read_counter_desc = fmt::format("statistic the hotspots of app {}",
read_partition_desc);
+
_hot_points[i].emplace_back(std::make_unique<dsn::perf_counter_wrapper>());
+ _hot_points[i][READ_HOTSPOT_DATA]->init_app_counter("app.pegasus",
+
read_counter_name.c_str(),
+
COUNTER_TYPE_NUMBER,
+
read_counter_desc.c_str());
+ string write_partition_desc = _app_name + '.' + "write." +
std::to_string(i);
+ write_counter_name = fmt::format("app.stat.hotspots@{}",
write_partition_desc);
+ write_counter_desc = fmt::format("statistic the hotspots of app {}",
write_partition_desc);
+
_hot_points[i].emplace_back(std::make_unique<dsn::perf_counter_wrapper>());
+ _hot_points[i][WRITE_HOTSPOT_DATA]->init_app_counter("app.pegasus",
+
write_counter_name.c_str(),
+
COUNTER_TYPE_NUMBER,
+
write_counter_desc.c_str());
}
}
void hotspot_partition_calculator::data_analyse()
{
- dassert(_partition_stat_histories.back().size() == _hot_points.size(),
- "partition counts error, please check");
- std::vector<double> data_samples;
- data_samples.reserve(_partition_stat_histories.size() *
_hot_points.size());
- auto temp_data = _partition_stat_histories;
- double table_qps_sum = 0, standard_deviation = 0, table_qps_avg = 0;
- int sample_count = 0;
- while (!temp_data.empty()) {
- for (const auto &partition_data : temp_data.front()) {
- if (partition_data.total_qps - 1.00 > 0) {
- data_samples.push_back(partition_data.total_qps);
- table_qps_sum += partition_data.total_qps;
- sample_count++;
+ dcheck_eq(_partition_stat_histories.back().size(), _hot_points.size());
+ for (int data_type = 0; data_type <= 1; data_type++) {
+ // 0: READ_HOTSPOT_DATA; 1: WRITE_HOTSPOT_DATA
Review comment:
`for (int data_type = 0; data_type < HOTSPOT_DATA_TYPE_COUNT;
data_type++) `
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]