elvis-cai opened a new issue #4906:
URL: https://github.com/apache/apisix/issues/4906


   ### Issue description
   
   Hi, we are testing apisix to load balancing to two ingress domains which is 
`apple.internal.com` and `banana.internal.com`, in order to make request from 
apisix to upstream working, we add `pass_host: node` option, so apisix will add 
host from upstream node, it works perfectly.
   We continue with the second round test with active health check enabled, 
when scaling down `apple` or `banana`, we found apisix is still routing to the 
bad host, here are the testing results.
   
   ```
    for i in $(seq 1 1000); do curl  -H "Host: httpbin.org" 
${APISIX_GATEWAY_URL}  ; done 
   apple
   apple
   <html>
   <head><title>503 Service Temporarily Unavailable</title></head>
   <body>
   <center><h1>503 Service Temporarily Unavailable</h1></center>
   <hr><center>nginx/1.17.7</center>
   </body>
   </html>
   apple
   apple
   apple
   <html>
   <head><title>503 Service Temporarily Unavailable</title></head>
   <body>
   <center><h1>503 Service Temporarily Unavailable</h1></center>
   <hr><center>nginx/1.17.7</center>
   </body>
   </html>
   apple
   apple
   apple
   apple
   apple
   ```
   
   below is my route/upstream configuration:
   ```
   curl "${APISIX_ADMIN_URL}/apisix/admin/routes/1"  -H "X-API-KEY: 
edd1c9f034335f136f87ad84b625c8f1" -X PUT -d '
   {
           "uri": "/*",
           "host": "httpbin.org",
           "upstream": {
             "pass_host": "node",
             "hash_on": "vars",
             "nodes": {
                   "fx-apple.awx.im:80": 1,
                   "fx-banana.awx.im:80": 1
             },
             "type": "roundrobin",
             "retries": 2,
             "checks": {
                 "active": {
                     "timeout": 1,
                     "http_path": "/",
                     "healthy": {
                         "http_statuses": [200, 201, 302],
                         "interval": 1,
                         "successes": 1
                     },
                     "unhealthy": {
                         "http_statuses": [429,404,500,501,502,503,504,505],
                         "interval": 1,
                         "timeout": 1,
                         "http_failures": 1
                     }
                 },
                 "passive": {
                    "healthy": {
                       "http_statuses": [200, 201, 302],
                       "successes": 1
                    },
                    "unhealthy": {
                       "http_statuses": [429,404,500,501,502,503,504,505],
                       "http_failures": 1,
                       "tcp_failures": 1
                   }
                 }
             }
       }
   }'
   ```
   
   I also tested with `active.host` , if I add `active.host: 
apple.internal.com`,  the request will always go to apple, when scale down 
apple node, request will to go failed apple node and banana node,  after bring 
apple up, it will always hit apple again, not sure if I am missing anything 
here, any feedback appricated. 🙏
   
   ------------------------------------------------------------
   At the same time, I also did another test with kong, I found kong works 
fine, it can detect upstream failure and route to the working one, here's the 
code with kong(apply with `deck` cli)。
   ```
   _format_version: "1.1"
   services:
   - connect_timeout: 60000
     host: upstream
     name: example_service
     port: 80
     protocol: http
     read_timeout: 60000
     retries: 5
     routes:
     - https_redirect_status_code: 426
       name: mocking
       path_handling: v0
       paths:
       - /mock
       preserve_host: false
       protocols:
       - http
       - https
       regex_priority: 0
       request_buffering: true
       response_buffering: true
       strip_path: true
     write_timeout: 60000
   upstreams:
   - algorithm: round-robin
     hash_fallback: none
     hash_on: none
     hash_on_cookie_path: /
     healthchecks:
       active:
         concurrency: 10
         healthy:
           http_statuses:
           - 200
           - 201
           - 302
           interval: 1
           successes: 1
         http_path: /
         https_verify_certificate: false
         timeout: 0
         type: http
         unhealthy:
           http_failures: 1
           http_statuses:
           - 429
           - 404
           - 500
           - 501
           - 502
           - 503
           - 504
           - 505
           interval: 1
           tcp_failures: 0
           timeouts: 1
       passive:
         healthy:
           http_statuses:
           - 200
           - 201
           - 202
           - 203
           - 204
           - 205
           - 206
           - 207
           - 208
           - 226
           - 300
           - 301
           - 302
           - 303
           - 304
           - 305
           - 306
           - 307
           - 308
           successes: 1
         type: http
         unhealthy:
           http_failures: 1
           http_statuses:
           - 429
           - 500
           - 503
           tcp_failures: 0
           timeouts: 0
       threshold: 0
     name: upstream
     slots: 10000
     targets:
     - target: apple.internal.com:80
       weight: 100
     - target: apple.internal.com:80
       weight: 100
   ```
   
   
   
   ### Environment
   
   - apisix version (cmd: `apisix version`): 2.7
   - OS (cmd: `uname -a`):  Linux apisix-77dc794ff8-bfpcj 4.19.112+ #1 SMP Fri 
Sep 4 12:00:04 PDT 2020 x86_64 Linux
   - OpenResty / Nginx version (cmd: `nginx -V` or `openresty -V`):    
   ```nginx version: openresty/1.19.3.1
   built by gcc 10.2.1 20201203 (Alpine 10.2.1_pre1) 
   built with OpenSSL 1.1.1k  25 Mar 2021
   TLS SNI support enabled
   configure arguments: --prefix=/usr/local/openresty/nginx --with-cc-opt='-O2 
-DNGX_LUA_ABORT_AT_PANIC -I/usr/local/openresty/pcre/include 
-I/usr/local/openresty/openssl/include' --add-module=../ngx_devel_kit-0.3.1 
--add-module=../echo-nginx-module-0.62 --add-module=../xss-nginx-module-0.06 
--add-module=../ngx_coolkit-0.2 --add-module=../set-misc-nginx-module-0.32 
--add-module=../form-input-nginx-module-0.12 
--add-module=../encrypted-session-nginx-module-0.08 
--add-module=../srcache-nginx-module-0.32 --add-module=../ngx_lua-0.10.19 
--add-module=../ngx_lua_upstream-0.07 
--add-module=../headers-more-nginx-module-0.33 
--add-module=../array-var-nginx-module-0.05 
--add-module=../memc-nginx-module-0.19 --add-module=../redis2-nginx-module-0.15 
--add-module=../redis-nginx-module-0.3.7 
--add-module=../rds-json-nginx-module-0.15 
--add-module=../rds-csv-nginx-module-0.09 --add-module=../ngx_stream_lua-0.0.9 
--with-ld-opt='-Wl,-rpath,/usr/local/openresty/luajit/lib 
-L/usr/local/openresty/pcre/l
 ib -L/usr/local/openresty/openssl/lib 
-Wl,-rpath,/usr/local/openresty/pcre/lib:/usr/local/openresty/openssl/lib' 
--with-pcre --with-compat --with-file-aio --with-http_addition_module 
--with-http_auth_request_module --with-http_dav_module --with-http_flv_module 
--with-http_geoip_module=dynamic --with-http_gunzip_module 
--with-http_gzip_static_module --with-http_image_filter_module=dynamic 
--with-http_mp4_module --with-http_random_index_module 
--with-http_realip_module --with-http_secure_link_module 
--with-http_slice_module --with-http_ssl_module --with-http_stub_status_module 
--with-http_sub_module --with-http_v2_module --with-http_xslt_module=dynamic 
--with-ipv6 --with-mail --with-mail_ssl_module --with-md5-asm --with-pcre-jit 
--with-sha1-asm --with-stream --with-stream_ssl_module --with-threads 
--with-stream --with-stream_ssl_preread_module
   ```
   
   - etcd version, if have (cmd: run `curl 
http://127.0.0.1:9090/v1/server_info` to get the info from server-info API):
   etcdctl version: 3.4.16


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to