This is an automated email from the ASF dual-hosted git repository.

nic443 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/apisix.git


The following commit(s) were added to refs/heads/master by this push:
     new 7fccd330e fix: control api return wrong status data for passive health 
check (#12975)
7fccd330e is described below

commit 7fccd330e39dc994368afade44dae15db0035682
Author: Nic <[email protected]>
AuthorDate: Thu Feb 5 18:36:53 2026 +0800

    fix: control api return wrong status data for passive health check (#12975)
    
    Signed-off-by: Nic <[email protected]>
---
 apisix-master-0.rockspec                 |  2 +-
 t/control/healthcheck.t                  | 92 ++++++++++++++++++++++++++++++++
 t/stream-node/healthcheck-resty-events.t |  2 +-
 3 files changed, 94 insertions(+), 2 deletions(-)

diff --git a/apisix-master-0.rockspec b/apisix-master-0.rockspec
index ecc2b56b8..1b6a5b9fd 100644
--- a/apisix-master-0.rockspec
+++ b/apisix-master-0.rockspec
@@ -41,7 +41,7 @@ dependencies = {
     "lua-resty-ngxvar = 0.5.2-0",
     "lua-resty-jit-uuid = 0.0.7-2",
     "lua-resty-ksuid = 1.0.1-0",
-    "lua-resty-healthcheck-api7 = 3.2.0-0",
+    "lua-resty-healthcheck-api7 = 3.2.1-0",
     "api7-lua-resty-jwt = 0.2.6-0",
     "lua-resty-hmac-ffi = 0.06-1",
     "lua-resty-cookie = 0.4.1-1",
diff --git a/t/control/healthcheck.t b/t/control/healthcheck.t
index 749c3e62a..79f8eb120 100644
--- a/t/control/healthcheck.t
+++ b/t/control/healthcheck.t
@@ -313,3 +313,95 @@ GET /v1/healthcheck/route/1
 --- error_code: 400
 --- response_body
 {"error_msg":"invalid src type route"}
+
+
+
+=== TEST 7: passive health check status
+--- yaml_config
+apisix:
+    node_listen: 1984
+deployment:
+    role: data_plane
+    role_data_plane:
+        config_provider: yaml
+--- apisix_yaml
+routes:
+  -
+    id: 1
+    uris:
+        - /specific_status
+    upstream:
+      nodes:
+        "127.0.0.1:1980": 1
+        "127.0.0.2:1980": 1
+      type: roundrobin
+      checks:
+        active:
+            healthy:
+                interval: 999 # large interval to avoid active check influence
+            unhealthy:
+                interval: 999
+        passive:
+          healthy:
+            http_statuses:
+              - 200
+            successes: 1
+          unhealthy:
+            http_statuses:
+              - 500
+            http_failures: 3
+#END
+--- config
+    location /t {
+        content_by_lua_block {
+            local json = require("toolkit.json")
+            local t = require("lib.test_admin")
+            local http = require "resty.http"
+
+            -- first request to trigger health checker manager startup
+            local uri = "http://127.0.0.1:"; .. ngx.var.server_port .. 
"/specific_status"
+            local httpc = http.new()
+            local res, err = httpc:request_uri(uri, {
+                method = "GET",
+                headers = {
+                    ["x-test-upstream-status"] = "500"
+                }
+            })
+            if not res then
+                ngx.say("failed to request: ", err)
+                return
+            end
+
+            ngx.sleep(1)
+
+            for i = 1, 6 do
+                local res, err = httpc:request_uri(uri, {
+                    method = "GET",
+                    headers = {
+                        ["x-test-upstream-status"] = "500"
+                    }
+                })
+                if not res then
+                    ngx.say("failed to request: ", err)
+                    return
+                end
+            end
+
+            local code, body, res = t.test('/v1/healthcheck/routes/1',
+                ngx.HTTP_GET)
+            ngx.log(ngx.ERR, "healthcheck response: ", res)
+            res = json.decode(res)
+            table.sort(res.nodes, function(a, b)
+                return a.ip < b.ip
+            end)
+            ngx.say(json.encode(res))
+        }
+    }
+--- grep_error_log eval
+qr/unhealthy HTTP increment \(.+\) for '127.0.0.1\(127.0.0.1:1980\)'/
+--- grep_error_log_out
+unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:1980)'
+unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:1980)'
+unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:1980)'
+--- response_body
+{"name":"/routes/1","nodes":[{"counter":{"http_failure":3,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"unhealthy"},{"counter":{"http_failure":3,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.2","ip":"127.0.0.2","port":1980,"status":"unhealthy"}],"type":"http"}
diff --git a/t/stream-node/healthcheck-resty-events.t 
b/t/stream-node/healthcheck-resty-events.t
index e97abc3bd..5e33c61c1 100644
--- a/t/stream-node/healthcheck-resty-events.t
+++ b/t/stream-node/healthcheck-resty-events.t
@@ -283,7 +283,7 @@ passed
 proxy request to 127.0.0.1:9995 while connecting to upstream
 connect() failed (111: Connection refused) while connecting to upstream, 
client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995"
 enabled healthcheck passive while connecting to upstream, client: 127.0.0.1, 
server: 0.0.0.0:1985, upstream: "127.0.0.1:9995",
-unhealthy TCP increment (1/1) for '(127.0.0.1:9995)' while connecting to 
upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995",
+unhealthy TCP increment (1/1) for '127.0.0.1(127.0.0.1:9995)' while connecting 
to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: 
"127.0.0.1:9995",
 proxy request to 127.0.0.1:1995 while connecting to upstream
 proxy request to 127.0.0.1:1995 while connecting to upstream
 proxy request to 127.0.0.1:1995 while connecting to upstream

Reply via email to