Hi List,
When i enable H2 'alpn h2,http/1.1' on haproxy bind line with offloading
'mode http'. The overall loading of a web-application i use takes longer
than without. (Tried with 1.9-dev5 and previous versions)
The webapp loads around 25 objects of css/js/images on a page, and when
using H1 it uses 4 keep-alive connections to retrieve all objects.
However when enabling H2 on the frontend the connection to the webserver
(which itself is also made with SSL encryption) is made for every single
requested object i suspect this is the main reason for the slowdown, it
now needs to perform the ssl handshake on the backend 25 times.
Is this by (current) design? Is it planned/possible this will be changed
before 1.9 release?
Or is it likely my configuration / conclusion is wrong?
I've added a little vtc trying to simulate the behavior, it currently
fails on "---- s4 0.2 HTTP rx failed (fd:10 read: Connection reset by
peer)" while that is where the s4 server expects a second request over
its keep-alive connection. (assuming i wrote the test correctly..) While
it 'should' fail on the s3 server.
Regards,
PiBa-NL (Pieter)
# h2 with h1 backend connection reuse check
# the c3 > h1 > s3 test works (wrongly?) because haproxy breaks connection to
the server, and creates a new one..
# the c4 > h1 > s4 test fails because haproxy breaks connection to the server,
while it should keep the connection alive.
varnishtest "h2 with h1 backend connection reuse check"
feature ignore_unknown_macro
server s1 {
rxreq
txresp -gziplen 200
rxreq
txresp -gziplen 200
} -start
server s2 {
stream 0 {
rxsettings
txsettings -ack
} -run
stream 1 {
rxreq
txresp -bodylen 200
} -run
stream 3 {
rxreq
txresp -bodylen 200
} -run
} -start
server s3 -repeat 2 {
rxreq
txresp -gziplen 200
} -start
server s4 {
rxreq
txresp -gziplen 200
rxreq
txresp -gziplen 200
} -start
haproxy h1 -W -conf {
global
#nbthread 3
log :1514 local0
stats socket /tmp/haproxy.socket level admin
defaults
mode http
#option dontlog-normal
log global
option httplog
timeout connect 3s
timeout client 40s
timeout server 40s
frontend fe1
bind "fd@${fe1}"
default_backend b1
backend b1
option http-keep-alive
server srv1 ${s1_addr}:${s1_port}
frontend fe3
bind "fd@${fe3}" proto h2
default_backend b3
backend b3
server srv3 ${s3_addr}:${s3_port}
frontend fe4
bind "fd@${fe4}" proto h2
default_backend b4
backend b4
option http-keep-alive
server srv4 ${s4_addr}:${s4_port}
} -start
client c1 -connect ${h1_fe1_sock} {
txreq -url "/1"
rxresp
expect resp.status == 200
txreq -url "/2"
rxresp
expect resp.status == 200
} -start
client c1 -wait
client c2 -connect ${s2_sock} {
stream 0 {
txsettings -hdrtbl 0
rxsettings
} -run
stream 1 {
txreq -req GET -url /3
rxresp
} -run
stream 3 {
txreq -req GET -url /4
rxresp
} -run
} -start
client c2 -wait
client c3 -connect ${h1_fe3_sock} {
stream 0 {
txsettings -hdrtbl 0
rxsettings
} -run
stream 1 {
txreq -req GET -url /3
rxresp
} -run
stream 3 {
txreq -req GET -url /4
rxresp
} -run
} -start
client c3 -wait
client c4 -connect ${h1_fe4_sock} {
stream 0 {
txsettings -hdrtbl 0
rxsettings
} -run
stream 1 {
txreq -req GET -url /3
rxresp
} -run
stream 3 {
txreq -req GET -url /4
rxresp
} -run
} -start
client c4 -wait
server s1 -wait
server s2 -wait
server s3 -wait
server s4 -wait