levy5307 commented on a change in pull request #592:
URL: https://github.com/apache/incubator-pegasus/pull/592#discussion_r486750639



##########
File path: src/server/hotspot_partition_calculator.cpp
##########
@@ -34,67 +35,84 @@ DSN_DEFINE_int64("pegasus.collector",
                  "eliminate outdated historical "
                  "data");
 
-void hotspot_partition_calculator::data_aggregate(const std::vector<row_data> 
&partitions)
+void hotspot_partition_calculator::data_aggregate(const std::vector<row_data> 
&partition_stats)
 {
     while (_partition_stat_histories.size() > FLAGS_max_hotspot_store_size - 
1) {
-        _partition_stat_histories.pop();
+        _partition_stat_histories.pop_front();
     }
-    std::vector<hotspot_partition_data> temp(partitions.size());
-    // TODO refactor the data structure
-    for (int i = 0; i < partitions.size(); i++) {
-        temp[i] = std::move(hotspot_partition_data(partitions[i]));
+    std::vector<hotspot_partition_data> temp;
+    for (const auto &partition_stat : partition_stats) {
+        temp.emplace_back(hotspot_partition_data(partition_stat));
     }
-    _partition_stat_histories.emplace(temp);
+    _partition_stat_histories.emplace_back(temp);
 }
 
 void hotspot_partition_calculator::init_perf_counter(int partition_count)
 {
-    std::string counter_name;
-    std::string counter_desc;
+    std::string read_counter_name, write_counter_name;
+    std::string read_counter_desc, write_counter_desc;
     for (int i = 0; i < partition_count; i++) {
-        string partition_desc = _app_name + '.' + std::to_string(i);
-        counter_name = fmt::format("app.stat.hotspots@{}", partition_desc);
-        counter_desc = fmt::format("statistic the hotspots of app {}", 
partition_desc);
-        _hot_points[i].init_app_counter(
-            "app.pegasus", counter_name.c_str(), COUNTER_TYPE_NUMBER, 
counter_desc.c_str());
+        string read_partition_desc = _app_name + '.' + "read." + 
std::to_string(i);
+        read_counter_name = fmt::format("app.stat.hotspots@{}", 
read_partition_desc);
+        read_counter_desc = fmt::format("statistic the hotspots of app {}", 
read_partition_desc);
+        
_hot_points[i].emplace_back(std::make_unique<dsn::perf_counter_wrapper>());
+        _hot_points[i][READ_HOTSPOT_DATA]->init_app_counter("app.pegasus",
+                                                            
read_counter_name.c_str(),
+                                                            
COUNTER_TYPE_NUMBER,
+                                                            
read_counter_desc.c_str());
+        string write_partition_desc = _app_name + '.' + "write." + 
std::to_string(i);
+        write_counter_name = fmt::format("app.stat.hotspots@{}", 
write_partition_desc);
+        write_counter_desc = fmt::format("statistic the hotspots of app {}", 
write_partition_desc);
+        
_hot_points[i].emplace_back(std::make_unique<dsn::perf_counter_wrapper>());
+        _hot_points[i][WRITE_HOTSPOT_DATA]->init_app_counter("app.pegasus",
+                                                             
write_counter_name.c_str(),
+                                                             
COUNTER_TYPE_NUMBER,
+                                                             
write_counter_desc.c_str());
     }
 }
 
 void hotspot_partition_calculator::data_analyse()
 {
-    dassert(_partition_stat_histories.back().size() == _hot_points.size(),
-            "partition counts error, please check");
-    std::vector<double> data_samples;
-    data_samples.reserve(_partition_stat_histories.size() * 
_hot_points.size());
-    auto temp_data = _partition_stat_histories;
-    double table_qps_sum = 0, standard_deviation = 0, table_qps_avg = 0;
-    int sample_count = 0;
-    while (!temp_data.empty()) {
-        for (const auto &partition_data : temp_data.front()) {
-            if (partition_data.total_qps - 1.00 > 0) {
-                data_samples.push_back(partition_data.total_qps);
-                table_qps_sum += partition_data.total_qps;
-                sample_count++;
+    dcheck_eq(_partition_stat_histories.back().size(), _hot_points.size());
+    for (int data_type = 0; data_type <= 1; data_type++) {
+        // 0: READ_HOTSPOT_DATA; 1: WRITE_HOTSPOT_DATA
+        double table_qps_sum = 0, standard_deviation = 0, table_qps_avg = 0;
+        int sample_count = 0;
+        for (const auto &partition_datas : _partition_stat_histories) {
+            for (const auto &partition_data : partition_datas) {
+                if (partition_data.total_qps[data_type] > 1.00) {
+                    table_qps_sum += partition_data.total_qps[data_type];
+                    sample_count++;
+                }
             }
         }
-        temp_data.pop();
-    }
-    if (sample_count == 0) {
-        ddebug("_partition_stat_histories size == 0");
-        return;
-    }
-    table_qps_avg = table_qps_sum / sample_count;
-    for (const auto &data_sample : data_samples) {
-        standard_deviation += pow((data_sample - table_qps_avg), 2);
-    }
-    standard_deviation = sqrt(standard_deviation / sample_count);
-    const auto &anly_data = _partition_stat_histories.back();
-    for (int i = 0; i < _hot_points.size(); i++) {
-        double hot_point = (anly_data[i].total_qps - table_qps_avg) / 
standard_deviation;
-        // perf_counter->set can only be unsigned __int64
-        // use ceil to guarantee conversion results
-        hot_point = ceil(std::max(hot_point, double(0)));
-        _hot_points[i]->set(hot_point);
+
+        if (sample_count <= 1) {
+            ddebug("_partition_stat_histories size <= 1");
+            return;
+        }
+        table_qps_avg = table_qps_sum / sample_count;
+        for (const auto &partition_datas : _partition_stat_histories) {
+            for (const auto &partition_data : partition_datas) {
+                if (partition_data.total_qps[data_type] > 1.00) {
+                    standard_deviation +=
+                        pow((partition_data.total_qps[data_type] - 
table_qps_avg), 2);
+                }
+            }
+        }
+        standard_deviation = sqrt(standard_deviation / (sample_count - 1));
+        const auto &anly_data = _partition_stat_histories.back();
+        for (int i = 0; i < _hot_points.size(); i++) {
+            double hot_point = 0;
+            if (standard_deviation != 0) {
+                hot_point =
+                    (anly_data[i].total_qps[data_type] - table_qps_avg) / 
standard_deviation;
+            }
+            // perf_counter->set can only be unsigned __int64

Review comment:
       `unsigned __int64` --> `uint64_t`




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to