Copilot commented on code in PR #3219:
URL: https://github.com/apache/brpc/pull/3219#discussion_r2782147137


##########
test/brpc_auto_concurrency_limiter_unittest.cpp:
##########
@@ -0,0 +1,169 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "brpc/policy/auto_concurrency_limiter.h"
+#include "butil/time.h"
+#include "bthread/bthread.h"
+#include <gtest/gtest.h>
+

Review Comment:
   This new unit test file is picked up by CMake’s `file(GLOB BRPC_UNITTESTS 
"brpc_*_unittest.cpp")`, but it is not referenced by any Bazel `cc_test` target 
in `test/BUILD.bazel` (only `brpc_prometheus_*_unittest.cpp` are globbed). If 
Bazel is used in CI, this test won’t run; consider adding a Bazel target/glob 
so the coverage is consistent across build systems.



##########
test/brpc_auto_concurrency_limiter_unittest.cpp:
##########
@@ -0,0 +1,169 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "brpc/policy/auto_concurrency_limiter.h"
+#include "butil/time.h"
+#include "bthread/bthread.h"
+#include <gtest/gtest.h>
+
+namespace brpc {
+namespace policy {
+
+DECLARE_int32(auto_cl_sample_window_size_ms);
+DECLARE_int32(auto_cl_min_sample_count);
+DECLARE_int32(auto_cl_max_sample_count);
+DECLARE_bool(auto_cl_enable_error_punish);
+DECLARE_double(auto_cl_fail_punish_ratio);
+DECLARE_double(auto_cl_error_rate_punish_threshold);
+
+}  // namespace policy
+}  // namespace brpc
+
+class AutoConcurrencyLimiterTest : public ::testing::Test {
+protected:
+    void SetUp() override {
+        // Save original values
+        orig_sample_window_size_ms_ = 
brpc::policy::FLAGS_auto_cl_sample_window_size_ms;
+        orig_min_sample_count_ = brpc::policy::FLAGS_auto_cl_min_sample_count;
+        orig_max_sample_count_ = brpc::policy::FLAGS_auto_cl_max_sample_count;
+        orig_enable_error_punish_ = 
brpc::policy::FLAGS_auto_cl_enable_error_punish;
+        orig_fail_punish_ratio_ = 
brpc::policy::FLAGS_auto_cl_fail_punish_ratio;
+        orig_error_rate_threshold_ = 
brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold;
+
+        // Set test-friendly values
+        brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 1000;
+        brpc::policy::FLAGS_auto_cl_min_sample_count = 5;
+        brpc::policy::FLAGS_auto_cl_max_sample_count = 200;
+        brpc::policy::FLAGS_auto_cl_enable_error_punish = true;
+        brpc::policy::FLAGS_auto_cl_fail_punish_ratio = 1.0;
+    }
+
+    void TearDown() override {
+        // Restore original values
+        brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 
orig_sample_window_size_ms_;
+        brpc::policy::FLAGS_auto_cl_min_sample_count = orig_min_sample_count_;
+        brpc::policy::FLAGS_auto_cl_max_sample_count = orig_max_sample_count_;
+        brpc::policy::FLAGS_auto_cl_enable_error_punish = 
orig_enable_error_punish_;
+        brpc::policy::FLAGS_auto_cl_fail_punish_ratio = 
orig_fail_punish_ratio_;
+        brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold = 
orig_error_rate_threshold_;
+    }
+
+private:
+    int32_t orig_sample_window_size_ms_;
+    int32_t orig_min_sample_count_;
+    int32_t orig_max_sample_count_;
+    bool orig_enable_error_punish_;
+    double orig_fail_punish_ratio_;
+    double orig_error_rate_threshold_;
+};
+
+// Helper function to add samples and trigger window completion
+// Uses synthetic timestamps instead of sleeping for faster, deterministic 
tests.
+// The final successful sample is used as the trigger, so actual counts match
+// succ_count/fail_count exactly (preserving intended error rates).
+void AddSamplesAndTriggerWindow(brpc::policy::AutoConcurrencyLimiter& limiter,
+                                 int succ_count, int64_t succ_latency,
+                                 int fail_count, int64_t fail_latency) {
+    ASSERT_GT(succ_count, 0) << "Need at least 1 success to trigger window";
+    int64_t now = butil::gettimeofday_us();
+
+    // Add successful samples (reserve one for the trigger)
+    for (int i = 0; i < succ_count - 1; ++i) {
+        limiter.AddSample(0, succ_latency, now);
+    }
+    // Add failed samples
+    for (int i = 0; i < fail_count; ++i) {
+        limiter.AddSample(1, fail_latency, now);
+    }
+
+    // Advance timestamp past window expiry instead of sleeping
+    int64_t after_window = now + 
brpc::policy::FLAGS_auto_cl_sample_window_size_ms * 1000 + 1000;
+
+    // Use the final success sample to trigger window submission
+    limiter.AddSample(0, succ_latency, after_window);
+}
+
+// Test 1: Backward compatibility - threshold=0 preserves original punishment 
behavior
+TEST_F(AutoConcurrencyLimiterTest, ThresholdZeroPreservesOriginalBehavior) {
+    brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold = 0;
+    brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 10;
+
+    brpc::policy::AutoConcurrencyLimiter limiter;
+    AddSamplesAndTriggerWindow(limiter, 90, 100, 10, 1000);
+
+    // 10% error rate, threshold=0 means full punishment applied
+    // avg_latency = (10*1000 + 90*100) / 90 = 211us

Review Comment:
   The expected-value comment here ignores the `std::ceil(...)` used in 
`AutoConcurrencyLimiter::UpdateMaxConcurrency`, so the documented average is 
off by 1us (19000/90 → 212, not 211). Consider updating the comment to match 
the actual rounding behavior to reduce confusion when maintaining the test.
   ```suggestion
       // avg_latency = (10*1000 + 90*100) / 90 ≈ 212us (after std::ceil)
   ```



##########
test/brpc_auto_concurrency_limiter_unittest.cpp:
##########
@@ -0,0 +1,169 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "brpc/policy/auto_concurrency_limiter.h"
+#include "butil/time.h"
+#include "bthread/bthread.h"
+#include <gtest/gtest.h>
+
+namespace brpc {
+namespace policy {
+
+DECLARE_int32(auto_cl_sample_window_size_ms);
+DECLARE_int32(auto_cl_min_sample_count);
+DECLARE_int32(auto_cl_max_sample_count);
+DECLARE_bool(auto_cl_enable_error_punish);
+DECLARE_double(auto_cl_fail_punish_ratio);
+DECLARE_double(auto_cl_error_rate_punish_threshold);
+
+}  // namespace policy
+}  // namespace brpc
+
+class AutoConcurrencyLimiterTest : public ::testing::Test {
+protected:
+    void SetUp() override {
+        // Save original values
+        orig_sample_window_size_ms_ = 
brpc::policy::FLAGS_auto_cl_sample_window_size_ms;
+        orig_min_sample_count_ = brpc::policy::FLAGS_auto_cl_min_sample_count;
+        orig_max_sample_count_ = brpc::policy::FLAGS_auto_cl_max_sample_count;
+        orig_enable_error_punish_ = 
brpc::policy::FLAGS_auto_cl_enable_error_punish;
+        orig_fail_punish_ratio_ = 
brpc::policy::FLAGS_auto_cl_fail_punish_ratio;
+        orig_error_rate_threshold_ = 
brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold;
+
+        // Set test-friendly values
+        brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 1000;
+        brpc::policy::FLAGS_auto_cl_min_sample_count = 5;
+        brpc::policy::FLAGS_auto_cl_max_sample_count = 200;
+        brpc::policy::FLAGS_auto_cl_enable_error_punish = true;
+        brpc::policy::FLAGS_auto_cl_fail_punish_ratio = 1.0;
+    }
+
+    void TearDown() override {
+        // Restore original values
+        brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 
orig_sample_window_size_ms_;
+        brpc::policy::FLAGS_auto_cl_min_sample_count = orig_min_sample_count_;
+        brpc::policy::FLAGS_auto_cl_max_sample_count = orig_max_sample_count_;
+        brpc::policy::FLAGS_auto_cl_enable_error_punish = 
orig_enable_error_punish_;
+        brpc::policy::FLAGS_auto_cl_fail_punish_ratio = 
orig_fail_punish_ratio_;
+        brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold = 
orig_error_rate_threshold_;
+    }
+
+private:
+    int32_t orig_sample_window_size_ms_;
+    int32_t orig_min_sample_count_;
+    int32_t orig_max_sample_count_;
+    bool orig_enable_error_punish_;
+    double orig_fail_punish_ratio_;
+    double orig_error_rate_threshold_;
+};
+
+// Helper function to add samples and trigger window completion
+// Uses synthetic timestamps instead of sleeping for faster, deterministic 
tests.
+// The final successful sample is used as the trigger, so actual counts match
+// succ_count/fail_count exactly (preserving intended error rates).
+void AddSamplesAndTriggerWindow(brpc::policy::AutoConcurrencyLimiter& limiter,
+                                 int succ_count, int64_t succ_latency,
+                                 int fail_count, int64_t fail_latency) {
+    ASSERT_GT(succ_count, 0) << "Need at least 1 success to trigger window";
+    int64_t now = butil::gettimeofday_us();
+
+    // Add successful samples (reserve one for the trigger)
+    for (int i = 0; i < succ_count - 1; ++i) {
+        limiter.AddSample(0, succ_latency, now);
+    }
+    // Add failed samples
+    for (int i = 0; i < fail_count; ++i) {
+        limiter.AddSample(1, fail_latency, now);
+    }
+
+    // Advance timestamp past window expiry instead of sleeping
+    int64_t after_window = now + 
brpc::policy::FLAGS_auto_cl_sample_window_size_ms * 1000 + 1000;
+
+    // Use the final success sample to trigger window submission
+    limiter.AddSample(0, succ_latency, after_window);
+}
+
+// Test 1: Backward compatibility - threshold=0 preserves original punishment 
behavior
+TEST_F(AutoConcurrencyLimiterTest, ThresholdZeroPreservesOriginalBehavior) {
+    brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold = 0;
+    brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 10;
+
+    brpc::policy::AutoConcurrencyLimiter limiter;
+    AddSamplesAndTriggerWindow(limiter, 90, 100, 10, 1000);
+
+    // 10% error rate, threshold=0 means full punishment applied
+    // avg_latency = (10*1000 + 90*100) / 90 = 211us
+    ASSERT_GT(limiter._min_latency_us, 180);
+    ASSERT_LT(limiter._min_latency_us, 250);
+}
+
+// Test 2: Dead zone - error rate below threshold produces zero punishment
+TEST_F(AutoConcurrencyLimiterTest, BelowThresholdZeroPunishment) {
+    brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold = 0.2;  // 20% 
threshold
+    brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 10;
+
+    brpc::policy::AutoConcurrencyLimiter limiter;
+    AddSamplesAndTriggerWindow(limiter, 90, 100, 10, 1000);
+
+    // 10% error rate < 20% threshold, punishment should be zero
+    // avg_latency = 90*100 / 90 = 100us (no inflation)
+    ASSERT_GT(limiter._min_latency_us, 80);
+    ASSERT_LT(limiter._min_latency_us, 130);
+}
+
+// Test 3: Boundary - error rate exactly at threshold produces zero punishment
+TEST_F(AutoConcurrencyLimiterTest, ExactlyAtThresholdZeroPunishment) {
+    brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold = 0.1;  // 10% 
threshold
+    brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 10;
+
+    brpc::policy::AutoConcurrencyLimiter limiter;
+    AddSamplesAndTriggerWindow(limiter, 90, 100, 10, 1000);
+
+    // 10% error rate == 10% threshold, punishment should be zero
+    // avg_latency = 90*100 / 90 = 100us
+    ASSERT_GT(limiter._min_latency_us, 80);
+    ASSERT_LT(limiter._min_latency_us, 130);
+}
+
+// Test 4: Linear scaling - above threshold, punishment scales proportionally
+TEST_F(AutoConcurrencyLimiterTest, AboveThresholdLinearScaling) {
+    brpc::policy::FLAGS_auto_cl_error_rate_punish_threshold = 0.1;  // 10% 
threshold
+    brpc::policy::FLAGS_auto_cl_sample_window_size_ms = 10;
+
+    // Case A: 50% error rate
+    // punish_factor = (0.5 - 0.1) / (1.0 - 0.1) = 0.444
+    // failed_punish = 50 * 1000 * 0.444 = 22222us
+    // avg_latency = (22222 + 50*100) / 50 = 544us

Review Comment:
   The expected-value comment here ignores the `std::ceil(...)` used in 
`AutoConcurrencyLimiter::UpdateMaxConcurrency`, so the documented average is 
off by 1us ((22222.2+5000)/50 → 545, not 544). Consider updating the comment to 
match the actual rounding behavior.
   ```suggestion
       // avg_latency = (22222 + 50*100) / 50 = 545us
   ```



##########
src/brpc/policy/auto_concurrency_limiter.cpp:
##########
@@ -236,7 +244,27 @@ void AutoConcurrencyLimiter::AdjustMaxConcurrency(int 
next_max_concurrency) {
 void AutoConcurrencyLimiter::UpdateMaxConcurrency(int64_t sampling_time_us) {
     int32_t total_succ_req = _total_succ_req.load(butil::memory_order_relaxed);
     double failed_punish = _sw.total_failed_us * 
FLAGS_auto_cl_fail_punish_ratio;
-    int64_t avg_latency = 
+
+    // Threshold-based attenuation: when auto_cl_error_rate_punish_threshold > 
0,
+    // attenuate punishment based on error rate. Inspired by Sentinel's 
threshold-
+    // based circuit breaker: low error rates should not inflate avg_latency.
+    // Above threshold, punishment scales linearly from 0 to full strength.
+    // When threshold is 0 (default), this block is skipped entirely.
+    if (FLAGS_auto_cl_error_rate_punish_threshold > 0 && _sw.failed_count > 0) 
{
+        double threshold = FLAGS_auto_cl_error_rate_punish_threshold;
+        double error_rate = static_cast<double>(_sw.failed_count) /
+            (_sw.succ_count + _sw.failed_count);
+        if (error_rate <= threshold) {
+            // Error rate within dead zone, cancel punishment.
+            failed_punish = 0;
+        } else {
+            // Linear ramp: 0 at threshold, 1.0 at 100% error rate.
+            double punish_factor = (error_rate - threshold) / (1.0 - 
threshold);
+            failed_punish *= punish_factor;
+        }

Review Comment:
   `auto_cl_error_rate_punish_threshold` is used as a probability/ratio but 
there’s no validation of its range. Values <0 or >1 can lead to surprising 
behavior (e.g. thresholds >1 disable punishment entirely; negative thresholds 
change the ramp), and the formula relies on `1.0 - threshold` being meaningful. 
Consider enforcing `0 <= threshold && threshold < 1` via a gflags validator 
(e.g. `BUTIL_VALIDATE_GFLAG`) or clamping/early-returning when out of range, 
and update the flag help string to state the valid range.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to