Hi,

OK, I haven't read the ML for ~2 weeks and a quick scan didn't reveal anything.
So, here I am asking something that may have been addressed already.

Today, I decided to switch my dev env to haproxy-1.8 using current master and I
started haproxy in the same way as I have been doing with older releases:

sudo ./haproxy -f /etc/haproxy/haproxy-ams4-dc.cfg
[WARNING] 340/173007 (3104) : parsing [/etc/haproxy/haproxy-ams4-dc.cfg:103] : a
'http-request' rule placed after a 'use_backend' rule will still be processed 
before.

above it didn't return and wasn't printing, expect the warning. I curled against
the IPs and got back connection error, see attached file for process output, 
lsof
info, build verion and haproxy.cfg.

I also started in the way it is mentioned in section 3 of management document:
sudo ./haproxy  -f /etc/haproxy/haproxy-ams4-dc.cfg -D -p /run/haproxy-ams4.pid
-sf $(cat /run/haproxy-ams4.pid)
cat: /run/haproxy-ams4.pid: No such file or directory
[WARNING] 340/173007 (3104) : parsing [/etc/haproxy/haproxy-ams4-dc.cfg:103] : a
'http-request' rule placed after a 'use_backend' rule will still be processed 
before.

but same result, haproxy didn't return and I had to CTRL-C it.

I am pretty sure I am doing something stupid but I can't find it.

Any ideas?

Cheers,
Pavlos
foo at me in ~
curl -v http://10.52.12.2                                                       
                                                      
* Rebuilt URL to: http://10.52.12.2/
*   Trying 10.52.12.2...
* TCP_NODELAY set
* connect to 10.52.12.2 port 80 failed: Connection refused
* Failed to connect to 10.52.12.2 port 80: Connection refused
* Closing connection 0
foo at me in ~ *7
ps|grep haproxy                                                                 
                                                      
root      8244  0.0  0.0  51352  3800 pts/25   S+   17:14   0:00  |   |   \_ 
sudo ./haproxy -f /etc/haproxy/haproxy-ams4-dc.cfg
root      8245 99.5  0.0  32468  5704 pts/25   R+   17:14   0:50  |   |       
\_ ./haproxy -f /etc/haproxy/haproxy-ams4-dc.cfg
ppariss+ 10834  0.0  0.0  12788   968 pts/26   S+   17:15   0:00  |       \_ 
grep --colour=auto haproxy

foo at me in ~
sudo lsof -n|grep haproxy                                                       
                                                      
lsof: WARNING: can't stat() fuse.gvfsd-fuse file system /run/user/1000/gvfs
      Output information may be incomplete.
rsyslogd    542              root    4u     unix 0xffff9b4879369c00        0t0  
    17715 /var/lib/haproxy/dev/log type=DGRAM
in:imuxso   542   570        root    4u     unix 0xffff9b4879369c00        0t0  
    17715 /var/lib/haproxy/dev/log type=DGRAM
in:imklog   542   571        root    4u     unix 0xffff9b4879369c00        0t0  
    17715 /var/lib/haproxy/dev/log type=DGRAM
in:imudp    542   572        root    4u     unix 0xffff9b4879369c00        0t0  
    17715 /var/lib/haproxy/dev/log type=DGRAM
rs:main     542   573        root    4u     unix 0xffff9b4879369c00        0t0  
    17715 /var/lib/haproxy/dev/log type=DGRAM
sudo       8244              root  cwd       DIR              254,4       4096  
  6307210 /home/foo/repo/haproxy-1.8
haproxy    8245              root  cwd       DIR              254,4       4096  
  6307210 /home/foo/repo/haproxy-1.8
haproxy    8245              root  rtd       DIR              254,1       4096  
        2 /
haproxy    8245              root  txt       REG              254,4    7643016  
  6307218 /home/foo/repo/haproxy-1.8/haproxy
haproxy    8245              root  mem       REG              254,1      47632  
   915005 /lib/x86_64-linux-gnu/libnss_files-2.24.so
haproxy    8245              root  mem       REG              254,1      47688  
   915010 /lib/x86_64-linux-gnu/libnss_nis-2.24.so
haproxy    8245              root  mem       REG              254,1      89064  
   915001 /lib/x86_64-linux-gnu/libnsl-2.24.so
haproxy    8245              root  mem       REG              254,1      31616  
   915003 /lib/x86_64-linux-gnu/libnss_compat-2.24.so
haproxy    8245              root  mem       REG              254,1    1689360  
   914991 /lib/x86_64-linux-gnu/libc-2.24.so
haproxy    8245              root  mem       REG              254,1     468920  
   914073 /lib/x86_64-linux-gnu/libpcre.so.3.13.3
haproxy    8245              root  mem       REG              254,1      10128  
   392554 /usr/lib/x86_64-linux-gnu/libpcreposix.so.3.13.3
haproxy    8245              root  mem       REG              254,1    2686672  
   396263 /usr/lib/x86_64-linux-gnu/libcrypto.so.1.1
haproxy    8245              root  mem       REG              254,1     442920  
   396761 /usr/lib/x86_64-linux-gnu/libssl.so.1.1
haproxy    8245              root  mem       REG              254,1     135440  
   915013 /lib/x86_64-linux-gnu/libpthread-2.24.so
haproxy    8245              root  mem       REG              254,1      14640  
   914996 /lib/x86_64-linux-gnu/libdl-2.24.so
haproxy    8245              root  mem       REG              254,1      39256  
   914995 /lib/x86_64-linux-gnu/libcrypt-2.24.so
haproxy    8245              root  mem       REG              254,1     153288  
   914986 /lib/x86_64-linux-gnu/ld-2.24.so
haproxy    8245              root    0u      CHR             136,25        0t0  
       28 /dev/pts/25
haproxy    8245              root    1u      CHR             136,25        0t0  
       28 /dev/pts/25
haproxy    8245              root    2u      CHR             136,25        0t0  
       28 /dev/pts/25
haproxy    8245              root    3r      REG              254,4      15626  
 11277333 /home/foo/repo/lb30/dc_failover/haproxy-ams4-dc.cfg~ (deleted)
bash      13062         foo  cwd       DIR              254,4       4096    
6307210 /home/foo/repo/haproxy-1.8



foo at me in ~/repo/haproxy-1.8 on (master u=)
./haproxy -vv
HA-Proxy version 1.8.1-fe66fd-7 2017/12/06
Copyright 2000-2017 Willy Tarreau <[email protected]>

Build options :
  TARGET  = linux2628
  CPU     = generic
  CC      = gcc
  CFLAGS  = -O2 -g -fno-strict-aliasing -Wdeclaration-after-statement -fwrapv 
-Wno-null-dereference -Wno-unused-label
  OPTIONS = USE_LINUX_SPLICE=1 USE_LINUX_TPROXY=1 USE_CPU_AFFINITY=1 
USE_REGPARM=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1

Default settings :
  maxconn = 2000, bufsize = 16384, maxrewrite = 1024, maxpollevents = 200

Built with OpenSSL version : OpenSSL 1.1.0f  25 May 2017
Running on OpenSSL version : OpenSSL 1.1.0f  25 May 2017
OpenSSL library supports TLS extensions : yes
OpenSSL library supports SNI : yes
OpenSSL library supports : TLSv1.0 TLSv1.1 TLSv1.2
Built with transparent proxy support using: IP_TRANSPARENT IPV6_TRANSPARENT 
IP_FREEBIND
Encrypted password support via crypt(3): yes
Built with multi-threading support.
Built with PCRE version : 8.39 2016-06-14
Running on PCRE version : 8.39 2016-06-14
PCRE library supports JIT : yes
Built without compression support (neither USE_ZLIB nor USE_SLZ are set).
Compression algorithms supported : identity("identity")
Built with network namespace support.

Available polling systems :
      epoll : pref=300,  test result OK
       poll : pref=200,  test result OK
     select : pref=150,  test result OK
Total: 3 (3 usable), will use epoll.

Available filters :
        [SPOE] spoe
        [COMP] compression
        [TRACE] trace

foo at me in ~/repo/haproxy-1.8 on (master u=)






global
    setenv LBGROUP external_101
    setenv DATACENTER ams4
    log 127.0.0.1:514 len 16384 local2
    chroot /var/lib/haproxy
    stats socket /run/haproxy/ams4.sock mode 666 level admin process 1
    stats timeout 30s
    user haproxy
    group haproxy
    daemon
    nbproc 1
    ca-base /etc/ssl/certs
    crt-base /etc/ssl/private
    # Default ciphers to use on SSL-enabled listening sockets.
    # For more information, see ciphers(1SSL).
    ssl-default-bind-ciphers 
kEECDH+aRSA+AES:kRSA+AES:+AES256:RC4-SHA:!kEDH:!LOW:!EXP:!MD5:!aNULL:!eNULL
    ssl-default-bind-options no-sslv3
    maxconn 10000
    pidfile /run/haproxy-ams4

defaults
    errorfile 400 /etc/haproxy/errors/400.http
    errorfile 403 /etc/haproxy/errors/403.http
    errorfile 408 /etc/haproxy/errors/408.http
    errorfile 500 /etc/haproxy/errors/500.http
    errorfile 502 /etc/haproxy/errors/502.http
    errorfile 503 /etc/haproxy/errors/503.http
    errorfile 504 /etc/haproxy/errors/504.http
    option     redispatch
    option     prefer-last-server
    #log-format "${LBGROUP}"\ %ci:%cp\ [%t]\ %ft\ %b/%s\ %Tq/%Tw/%Tc/%Tr/%Tt\ 
%ST\ %B\ %CC\ %CS\ %tsc\ %ac/%fc/%bc/%sc/%rc\ %sq/%bq\ %hs\ 
%[http_first_req]/%[capture.req.hdr(0),lower]\ %fi:%fp\ %sslc\ %sslv\ %{+Q}r
    log-format 
{\"lbgroup\":\""${LBGROUP}"\",\"dst_ip\":\"%fi\",\"dst_port\":\"%fp\",\"client_ip\":\"%ci\",\"client_port\":\"%cp\",\"timestamp\":\"%t\",\"frontend_name\":\"%ft\",\"backend_name\":\"%b\",\"server_name\":\"%s\",\"tq\":\"%Tq\",\"ta\":\"%Ta\",\"td\":\"%Td\",\"th\":\"%Th\",\"ti\":\"%Ti\",\"trf\":\"%TR\",\"tw\":\"%Tw\",\"tc\":\"%Tc\",\"tr\":\"%Tr\",\"tt\":\"%Tt\",\"status_code\":\"%ST\",\"bytes_read\":\"%B\",\"termination_state\":\"%tsc\",\"actconn\":\"%ac\",\"feconn\":\"%fc\",\"beconn\":\"%bc\",\"srv_conn\":\"%sc\",\"retries\":\"%rc\",\"srv_queue\":\"%sq\",\"backend_queue\":\"%bq\",\"toptalkers\":\"%[http_first_req]\",\"vhost\":\"%[capture.req.hdr(0),lower]\",\"ssl_ciphers\":\"%sslc\",\"ssl_version\":\"%sslv\",\"http_method\":\"%HM\",\"http_version\":\"%HV\",\"http_uri\":\"%HP\"}
    backlog 65535
    balance roundrobin
    log global
    maxconn 500000
    mode http
    no option dontlognull
    option contstats
    option http-keep-alive
    option tcp-smart-accept
    option tcp-smart-connect
    retries 2
    timeout check 5s
    timeout client 30s
    timeout connect 4s
    timeout http-request 30s
    timeout queue 1m
    timeout server 30s


frontend www.bar.com_http_ams4
    bind 10.52.12.1:80
    bind 10.52.13.1:80
    bind 10.52.14.1:80

    acl site_dead nbsrv(www.bar.com_http_all) lt 1
    monitor-uri   /site_alive
    monitor fail  if site_dead

    # businesslogic: www.bar.com_https_ash1:add_bar_headers
    http-request add-header X-Header-Order %[req.hdr_names(:)]
    http-request add-header F5SourceIP %[src]
    http-request add-header F5Nodename %H
    http-request add-header F5-Proto https if { ssl_fc }
    http-request add-header F5-Proto http unless { ssl_fc }
    http-request add-header F5CipherName %sslc if { ssl_fc }
    http-request add-header F5CipherVersion %sslv if { ssl_fc }
    http-request add-header F5CipherBits %[ssl_fc_use_keysize] if { ssl_fc }
    http-request add-header F5TrackerID %{+X}Ts%{+X}[rand()]
    http-response set-header X-XSS-Protection "1; mode=block"

    # no need to go to production
    http-response replace-header X-Debug (^.*$) frontend:%f,\1

    # DC availability logic.
    # Get target pool by looking at the destination IP address of the request
    http-request set-header X__Target-Pool__ 
%[str(www.bar.com_http_)]%[dst,map_ip(/etc/haproxy/dst_ip_dc.map,all)]
    http-request set-var(req.target_pool) hdr(X__Target-Pool__)
    # Clean up the header
    http-request del-header X__Target-Pool__

    # Use the target pool if it has enough healthy servers.
    # virtialserver.min_members property should be used here.
    use_backend %[var(req.target_pool)] if { var(req.target_pool),nbsrv ge 1 }

    # We end up here if the selected pool of a data center is down.
    # We don't want to use the all pool as it would flip users between data
    # centers, thus we are going to balance traffic across the two remaining
    # data centers using a hash against the client IP. Unfortunately, we will
    # check again for the availability of the data center, for which we know
    # already is down. I should try to figure out a way to somehow dynamically
    # know the remaining two data centers, so if asm4 is down then I should
    # only check lhr4 and fra4.

    # Check for the availability of the application in each data canter.
    # virtialserver.min_members property should be used here.
    acl www.bar.com_http_ams4_down nbsrv(www.bar.com_http_ams4) lt 1
    acl www.bar.com_http_lhr4_down nbsrv(www.bar.com_http_lhr4) lt 1
    acl www.bar.com_http_fra4_down nbsrv(www.bar.com_http_fra4) lt 1

    # Stores either 1 or 0.
    http-request set-var(req.selected_dc_backup) src,djb2,mod(2)

    # Balance traffic when ams4 is down
    use_backend www.bar.com_http_lhr4 if www.bar.com_http_ams4_down 
!www.bar.com_http_lhr4_down { var(req.selected_dc_backup) eq 0 }
    use_backend www.bar.com_http_fra4 if www.bar.com_http_ams4_down 
!www.bar.com_http_fra4_down { var(req.selected_dc_backup) eq 1 }

    # Balance traffic when lhr4 is down
    use_backend www.bar.com_http_ams4 if www.bar.com_http_lhr4_down 
!www.bar.com_http_ams4_down { var(req.selected_dc_backup) eq 0 }
    use_backend www.bar.com_http_fra4 if www.bar.com_http_lhr4_down 
!www.bar.com_http_fra4_down { var(req.selected_dc_backup) eq 1 }

    # Balance traffic when fra4 is down
    use_backend www.bar.com_http_ams4 if www.bar.com_http_lhr4_down 
!www.bar.com_http_ams4_down { var(req.selected_dc_backup) eq 0 }
    use_backend www.bar.com_http_lhr4 if www.bar.com_http_fra4_down 
!www.bar.com_http_lhr4_down { var(req.selected_dc_backup) eq 1 }

    # If two data centers are down then for simplicity reasons just use the all 
pool
    default_backend www.bar.com_http_all

frontend www.bar.com_minipop_http_ams4
    bind 10.52.16.1:80

    acl site_dead nbsrv(www.bar.com_http_all) lt 1
    monitor-uri   /site_alive
    monitor fail  if site_dead

    http-response replace-header X-Debug (^.*$) frontend:%f,\1

    # DC availability logic.
    http-request set-header X__Target-Pool__ 
%[str(www.bar.com_http)]_%[env(DATACENTER)]
    http-request set-var(req.target_pool) hdr(X__Target-Pool__)
    http-request del-header X__Target-Pool__

    # Use the target pool if it has enough healthy servers.
    # virtialserver.min_members property should be used here.
    use_backend %[var(req.target_pool)] if { var(req.target_pool),nbsrv ge 1 }

    # Check for the availability of app in a data canter.
    # NOTE: Two acl's with the same name produces a logical or.
    # virtialserver.min_members property should be used here.
    acl www.bar.com_http_ams4_down nbsrv(www.bar.com_http_ams4) lt 1
    acl www.bar.com_http_lhr4_down nbsrv(www.bar.com_http_lhr4) lt 1
    acl www.bar.com_http_fra4_down nbsrv(www.bar.com_http_fra4) lt 1

    # We end up here if the selected pool of a data center is down.
    # We don't want to use the all pool as it would flip users between data
    # centers, thus we are going to balance traffic across the two remaining
    # data centers using a hash against the client IP. Unfortunately, we will
    # check again for the availability of the data center, for which we know
    # already is down. I should try to figure out a way to somehow dynamically
    # know the remaining two data centers, so if asm4 is down then I should
    # only check lhr4 and fra4.

    # Stores either 1 or 0.
    http-request set-var(req.selected_dc_backup) req.hdr(F5SourceIP),djb2,mod(2)

    # Balance traffic when ams4 is down
    use_backend www.bar.com_http_lhr4 if www.bar.com_http_ams4_down 
!www.bar.com_http_lhr4_down { var(req.selected_dc_backup) eq 0 }
    use_backend www.bar.com_http_fra4 if www.bar.com_http_ams4_down 
!www.bar.com_http_fra4_down { var(req.selected_dc_backup) eq 1 }

    # Balance traffic when lhr4 is down
    use_backend www.bar.com_http_ams4 if www.bar.com_http_lhr4_down 
!www.bar.com_http_ams4_down { var(req.selected_dc_backup) eq 0 }
    use_backend www.bar.com_http_fra4 if www.bar.com_http_lhr4_down 
!www.bar.com_http_fra4_down { var(req.selected_dc_backup) eq 1 }

    # Balance traffic when fra4 is down
    use_backend www.bar.com_http_ams4 if www.bar.com_http_lhr4_down 
!www.bar.com_http_ams4_down { var(req.selected_dc_backup) eq 0 }
    use_backend www.bar.com_http_lhr4 if www.bar.com_http_fra4_down 
!www.bar.com_http_lhr4_down { var(req.selected_dc_backup) eq 1 }

    # If two data centers are down then for simplicity reasons just use the all 
pool
    default_backend www.bar.com_http_all

frontend admin.bar.com_https_ams4
    bind 10.52.12.2:80
    bind 10.52.13.2:80

    acl site_dead nbsrv(admin.bar.com_https_all) lt 1
    monitor-uri   /site_alive
    monitor fail  if site_dead

    # businesslogic: admin.bar.com_https_ash1:add_bar_headers
    #http-request add-header X-Header-Order %[req.hdr_names(:)]
    #http-request add-header F5SourceIP %[src]
    #http-request add-header F5Nodename %H
    #http-request add-header F5-Proto https if { ssl_fc }
    #http-request add-header F5-Proto http unless { ssl_fc }
    #http-request add-header F5CipherName %sslc if { ssl_fc }
    #http-request add-header F5CipherVersion %sslv if { ssl_fc }
    #http-request add-header F5CipherBits %[ssl_fc_use_keysize] if { ssl_fc }
    #http-request add-header F5TrackerID %{+X}Ts%{+X}[rand()]
    #http-response set-header X-XSS-Protection "1; mode=block"

    #http-response replace-header X-Debug (^.*$) frontend:%f,\1

    ## DC availability logic.
    #http-request set-header X__Target-Pool__ 
%[str(admin.bar.com_https_)]%[dst,map_ip(/etc/haproxy/dst_ip_dc.map,all)]
    #http-request set-var(req.target_pool) hdr(X__Target-Pool__)
    #http-request del-header X__Target-Pool__
    #use_backend %[var(req.target_pool)] if { var(req.target_pool),nbsrv ge 1 }

    # DC availability logic.
    # Get target pool by looking at the destination IP address of the request
    http-request set-header X__Target-Pool__ 
%[str(admin.bar.com_https_)]%[dst,map_ip(/etc/haproxy/dst_ip_dc.map,all)]
    http-request set-var(req.target_pool) hdr(X__Target-Pool__)
    # Clean up the header
    http-request del-header X__Target-Pool__

    # Use the target pool if it has enough healthy servers.
    # virtialserver.min_members property should be used here.
    use_backend %[var(req.target_pool)] if { var(req.target_pool),nbsrv ge 1 }

    # We end up here if the selected pool of a data center is down.
    # We don't want to use the all pool as it would flip users between data
    # centers, thus we are going to balance traffic across the two remaining
    # data centers using a hash against the client IP. Unfortunately, we will
    # check again for the availability of the data center, for which we know
    # already is down. I should try to figure out a way to somehow dynamically
    # know the remaining two data centers, so if asm4 is down then I should
    # only check lhr4 and fra4.

    # Check for the availability of the application in each data canter.
    # virtialserver.min_members property should be used here.
    acl admin.bar.com_https_ams4_down nbsrv(admin.bar.com_https_ams4) lt 1
    acl admin.bar.com_https_lhr4_down nbsrv(admin.bar.com_https_lhr4) lt 1
    acl admin.bar.com_https_fra4_down nbsrv(admin.bar.com_https_fra4) lt 1

    # Stores either 1 or 0.
    http-request set-var(req.selected_dc_backup) src,djb2,mod(2)

    # Balance traffic when ams4 is down
    use_backend admin.bar.com_https_lhr4 if admin.bar.com_https_ams4_down 
!admin.bar.com_https_lhr4_down { var(req.selected_dc_backup) eq 0 }
    use_backend admin.bar.com_https_fra4 if admin.bar.com_https_ams4_down 
!admin.bar.com_https_fra4_down { var(req.selected_dc_backup) eq 1 }

    # Balance traffic when lhr4 is down
    use_backend admin.bar.com_https_ams4 if admin.bar.com_https_lhr4_down 
!admin.bar.com_https_ams4_down { var(req.selected_dc_backup) eq 0 }
    use_backend admin.bar.com_https_fra4 if admin.bar.com_https_lhr4_down 
!admin.bar.com_https_fra4_down { var(req.selected_dc_backup) eq 1 }

    # Balance traffic when fra4 is down
    use_backend admin.bar.com_https_ams4 if admin.bar.com_https_lhr4_down 
!admin.bar.com_https_ams4_down { var(req.selected_dc_backup) eq 0 }
    use_backend admin.bar.com_https_lhr4 if admin.bar.com_https_fra4_down 
!admin.bar.com_https_lhr4_down { var(req.selected_dc_backup) eq 1 }
    # We end up here if the selected pool of a data center is down.
    # If one data centers is down then for simplicity reasons just use the all 
pool
    default_backend admin.bar.com_https_all

frontend admin.bar.com_minipop_https_ams4
    bind 10.52.16.2:80

    acl site_dead nbsrv(admin.bar.com_https_all) lt 1
    monitor-uri   /site_alive
    monitor fail  if site_dead

    http-response replace-header X-Debug (^.*$) frontend:%f,\1

    # DC availability logic.
    http-request set-header X__Target-Pool__ 
%[str(admin.bar.com_https)]_%[env(DATACENTER)]
    http-request set-var(req.target_pool) hdr(X__Target-Pool__)
    http-request del-header X__Target-Pool__

    # Use the target pool if it has enough healthy servers.
    # virtialserver.min_members property should be used here.
    use_backend %[var(req.target_pool)] if { var(req.target_pool),nbsrv ge 1 }

    # We end up here if the selected pool of a data center is down.
    # If one data centers is down then for simplicity reasons just use the all 
pool
    default_backend admin.bar.com_https_all

backend www.bar.com_http_lhr4
    default-server inter 5s
    http-request set-header X-Pool %b
    http-response add-header X-Debug backend:%b,server:%s

    server app-lhr4 10.52.12.9:8888 check

backend www.bar.com_http_ams4
    default-server inter 5s
    http-request set-header X-Pool %b
    http-response add-header X-Debug backend:%b,server:%s

    server app-ams4 10.52.13.9:8888 check

backend www.bar.com_http_fra4
    default-server inter 5s
    http-request set-header X-Pool %b
    http-response add-header X-Debug backend:%b,server:%s

    server app-fra4 10.52.14.9:8888 check

backend www.bar.com_http_all
    default-server inter 5s
    http-request set-header X-Pool %b
    http-response add-header X-Debug backend:%b,server:%s

    server app-lhr4 10.52.12.9:8888 check
    server app-ams4 10.52.13.9:8888 check
    server app-fra4 10.52.14.9:8888 check

backend admin.bar.com_https_lhr4
    default-server inter 60s
    http-request set-header X-Pool %b
    http-response add-header X-Debug backend:%b,server:%s

    server admin-lhr4 10.52.12.8:8888 check

backend admin.bar.com_https_ams4
    default-server inter 60s
    http-request set-header X-Pool %b
    http-response add-header X-Debug backend:%b,server:%s

    server admin-ams4 10.52.13.8:8888 check

backend admin.bar.com_https_all
    default-server inter 160s
    http-request set-header X-Pool %b
    http-response add-header X-Debug backend:%b,server:%s

    server admin-lhr4 10.52.12.8:8888 check
    server admin-ams4 10.52.13.8:8888 check

Attachment: signature.asc
Description: OpenPGP digital signature

Reply via email to