Willy,
Another fix (with some cleanup in other paches). The first one (and
probably the second one) can be backported. But I don't know if this is
mandatory. It is really tricky to find conditions where it could be a
problem.
Thanks
--
Christopher Faulet
>From 0cdf21fc9678ce44eddf10efb4fdaf737e2115b3 Mon Sep 17 00:00:00 2001
From: Christopher Faulet <[email protected]>
Date: Tue, 28 Mar 2017 11:51:33 +0200
Subject: [PATCH 1/4] BUG/MINOR: http: Fix conditions to clean up a txn and to
handle the next request
To finish a HTTP transaction and to start the new one, we check, among other
things, that there is enough space in the reponse buffer to eventually inject a
message during the parsing of the next request. Because these messages can reach
the maximum buffers size, it is mandatory to have an empty response
buffer. Remaining input data are trimmed during the txn cleanup (in
http_reset_txn), so we just need to check that the output data were flushed.
The current implementation depends on channel_congested, which does check the
reserved area is available. That's not of course good enough. There are other
tests on the reponse buffer is http_wait_for_request. But conditions to move on
are almost the same. So, we can imagine some scenarii where some output data
remaining in the reponse buffer during the request parsing prevent any messages
injection.
To fix this bug, we just wait that output data were flushed before cleaning up
the HTTP txn (ie. s->res.buf->o == 0). In addition, in http_reset_txn we realign
the response buffer (note the buffer is empty at this step).
Thanks to this changes, there is no more need to set CF_EXPECT_MORE on the
response channel in http_end_txn_clean_session. And more important, there is no
more need to check the response buffer state in http_wait_for_request. This
remove a workaround on response analysers to handle HTTP pipelining.
This patch can be backported in 1.7, 1.6 and 1.5.
---
src/proto_http.c | 48 +++++++-----------------------------------------
src/stream.c | 12 ------------
2 files changed, 7 insertions(+), 53 deletions(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index 2d97fbe..24d034a 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -2649,29 +2649,6 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
buffer_slow_realign(req->buf);
}
- /* Note that we have the same problem with the response ; we
- * may want to send a redirect, error or anything which requires
- * some spare space. So we'll ensure that we have at least
- * maxrewrite bytes available in the response buffer before
- * processing that one. This will only affect pipelined
- * keep-alive requests.
- */
- if ((txn->flags & TX_NOT_FIRST) &&
- unlikely(!channel_is_rewritable(&s->res) ||
- bi_end(s->res.buf) < b_ptr(s->res.buf, txn->rsp.next) ||
- bi_end(s->res.buf) > s->res.buf->data + s->res.buf->size - global.tune.maxrewrite)) {
- if (s->res.buf->o) {
- if (s->res.flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
- goto failed_keep_alive;
- /* don't let a connection request be initiated */
- channel_dont_connect(req);
- s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
- s->res.flags |= CF_WAKE_WRITE;
- s->res.analysers |= an_bit; /* wake us up once it changes */
- return 0;
- }
- }
-
if (likely(msg->next < req->buf->i)) /* some unparsed data are available */
http_msg_analyzer(msg, &txn->hdr_idx);
}
@@ -5292,20 +5269,6 @@ void http_end_txn_clean_session(struct stream *s)
s->res.flags |= CF_NEVER_WAIT;
}
- /* if the request buffer is not empty, it means we're
- * about to process another request, so send pending
- * data with MSG_MORE to merge TCP packets when possible.
- * Just don't do this if the buffer is close to be full,
- * because the request will wait for it to flush a little
- * bit before proceeding.
- */
- if (s->req.buf->i) {
- if (s->res.buf->o &&
- !buffer_full(s->res.buf, global.tune.maxrewrite) &&
- bi_end(s->res.buf) <= s->res.buf->data + s->res.buf->size - global.tune.maxrewrite)
- s->res.flags |= CF_EXPECT_MORE;
- }
-
/* we're removing the analysers, we MUST re-enable events detection.
* We don't enable close on the response channel since it's either
* already closed, or in keep-alive with an idle connection handler.
@@ -5686,13 +5649,13 @@ int http_resync_states(struct stream *s)
* possibly killing the server connection and reinitialize
* a fresh-new transaction, but only once we're sure there's
* enough room in the request and response buffer to process
- * another request. The request buffer must not hold any
- * pending output data and the request buffer must not have
- * output data occupying the reserve.
+ * another request. They must not hold any pending output data
+ * and the response buffer must realigned
+ * (realign is done is http_end_txn_clean_session).
*/
if (s->req.buf->o)
s->req.flags |= CF_WAKE_WRITE;
- else if (channel_congested(&s->res))
+ else if (s->res.buf->o)
s->res.flags |= CF_WAKE_WRITE;
else {
s->req.analysers = AN_REQ_FLT_END;
@@ -9043,6 +9006,9 @@ void http_reset_txn(struct stream *s)
if (unlikely(s->res.buf->i))
s->res.buf->i = 0;
+ /* Now we can realign the response buffer */
+ buffer_realign(s->res.buf);
+
s->req.rto = strm_fe(s)->timeout.client;
s->req.wto = TICK_ETERNITY;
diff --git a/src/stream.c b/src/stream.c
index 78cc7ff..bc8b3af 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -1834,18 +1834,6 @@ struct task *process_stream(struct task *t)
s->pending_events & TASK_WOKEN_MSG) {
unsigned int flags = res->flags;
- if ((res->flags & CF_MASK_ANALYSER) &&
- (res->analysers & AN_REQ_ALL)) {
- /* Due to HTTP pipelining, the HTTP request analyser might be waiting
- * for some free space in the response buffer, so we might need to call
- * it when something changes in the response buffer, but still we pass
- * it the request buffer. Note that the SI state might very well still
- * be zero due to us returning a flow of redirects!
- */
- res->analysers &= ~AN_REQ_ALL;
- req->flags |= CF_WAKE_ONCE;
- }
-
if (si_b->state >= SI_ST_EST) {
int max_loops = global.tune.maxpollevents;
unsigned int ana_list;
--
2.9.3
>From e1def7144d44bd745d4366c1cff1fed1b3280731 Mon Sep 17 00:00:00 2001
From: Christopher Faulet <[email protected]>
Date: Tue, 28 Mar 2017 11:52:37 +0200
Subject: [PATCH 2/4] CLEANUP: http: Remove channel_congested function
---
include/proto/channel.h | 21 ---------------------
1 file changed, 21 deletions(-)
diff --git a/include/proto/channel.h b/include/proto/channel.h
index 304a935..bcf8e39 100644
--- a/include/proto/channel.h
+++ b/include/proto/channel.h
@@ -159,27 +159,6 @@ static inline int channel_is_rewritable(const struct channel *chn)
return rem >= 0;
}
-/* Returns non-zero if the channel is congested with data in transit waiting
- * for leaving, indicating to the caller that it should wait for the reserve to
- * be released before starting to process new data in case it needs the ability
- * to append data. This is meant to be used while waiting for a clean response
- * buffer before processing a request.
- */
-static inline int channel_congested(const struct channel *chn)
-{
- if (!chn->buf->o)
- return 0;
-
- if (!channel_is_rewritable(chn))
- return 1;
-
- if (chn->buf->p + chn->buf->i >
- chn->buf->data + chn->buf->size - global.tune.maxrewrite)
- return 1;
-
- return 0;
-}
-
/* Tells whether data are likely to leave the buffer. This is used to know when
* we can safely ignore the reserve since we know we cannot retry a connection.
* It returns zero if data are blocked, non-zero otherwise.
--
2.9.3
>From 5b4bfdf2638323bebf3ecb31a22dbaf3d5c41679 Mon Sep 17 00:00:00 2001
From: Christopher Faulet <[email protected]>
Date: Tue, 28 Mar 2017 11:53:34 +0200
Subject: [PATCH 3/4] CLEANUP: buffers: Remove buffer_bounce_realign function
---
include/common/buffer.h | 1 -
src/buffer.c | 63 -------------------------------------------------
2 files changed, 64 deletions(-)
diff --git a/include/common/buffer.h b/include/common/buffer.h
index 3a6dfd7..578d4b1 100644
--- a/include/common/buffer.h
+++ b/include/common/buffer.h
@@ -57,7 +57,6 @@ int buffer_replace2(struct buffer *b, char *pos, char *end, const char *str, int
int buffer_insert_line2(struct buffer *b, char *pos, const char *str, int len);
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
void buffer_slow_realign(struct buffer *buf);
-void buffer_bounce_realign(struct buffer *buf);
/*****************************************************************/
/* These functions are used to compute various buffer area sizes */
diff --git a/src/buffer.c b/src/buffer.c
index 4f8f647..3f3a198 100644
--- a/src/buffer.c
+++ b/src/buffer.c
@@ -176,69 +176,6 @@ void buffer_slow_realign(struct buffer *buf)
buf->p = buf->data;
}
-
-/* Realigns a possibly non-contiguous buffer by bouncing bytes from source to
- * destination. It does not use any intermediate buffer and does the move in
- * place, though it will be slower than a simple memmove() on contiguous data,
- * so it's desirable to use it only on non-contiguous buffers. No pointers are
- * changed, the caller is responsible for that.
- */
-void buffer_bounce_realign(struct buffer *buf)
-{
- int advance, to_move;
- char *from, *to;
-
- from = bo_ptr(buf);
- advance = buf->data + buf->size - from;
- if (!advance)
- return;
-
- to_move = buffer_len(buf);
- while (to_move) {
- char last, save;
-
- last = *from;
- to = from + advance;
- if (to >= buf->data + buf->size)
- to -= buf->size;
-
- while (1) {
- save = *to;
- *to = last;
- last = save;
- to_move--;
- if (!to_move)
- break;
-
- /* check if we went back home after rotating a number of bytes */
- if (to == from)
- break;
-
- /* if we ended up in the empty area, let's walk to next place. The
- * empty area is either between buf->r and from or before from or
- * after buf->r.
- */
- if (from > bi_end(buf)) {
- if (to >= bi_end(buf) && to < from)
- break;
- } else if (from < bi_end(buf)) {
- if (to < from || to >= bi_end(buf))
- break;
- }
-
- /* we have overwritten a byte of the original set, let's move it */
- to += advance;
- if (to >= buf->data + buf->size)
- to -= buf->size;
- }
-
- from++;
- if (from >= buf->data + buf->size)
- from -= buf->size;
- }
-}
-
-
/*
* Dumps part or all of a buffer.
*/
--
2.9.3
>From dc7c80ef2790a0c331222914482acd777a470b40 Mon Sep 17 00:00:00 2001
From: Christopher Faulet <[email protected]>
Date: Wed, 29 Mar 2017 10:49:49 +0200
Subject: [PATCH 4/4] CLEANUP: buffers: Remove buffer_contig_area and
buffer_work_area functions
---
include/common/buffer.h | 27 ---------------------------
1 file changed, 27 deletions(-)
diff --git a/include/common/buffer.h b/include/common/buffer.h
index 578d4b1..040a26e 100644
--- a/include/common/buffer.h
+++ b/include/common/buffer.h
@@ -249,18 +249,6 @@ static inline int buffer_total_space(const struct buffer *buf)
return buf->size - buffer_len(buf);
}
-/* Returns the number of contiguous bytes between <start> and <start>+<count>,
- * and enforces a limit on buf->data + buf->size. <start> must be within the
- * buffer.
- */
-static inline int buffer_contig_area(const struct buffer *buf, const char *start, int count)
-{
- if (count > buf->data - start + buf->size)
- count = buf->data - start + buf->size;
- return count;
-}
-
-
/* Returns the amount of byte that can be written starting from <p> into the
* input buffer at once, including reserved space which may be overwritten.
* This is used by Lua to insert data in the input side just before the other
@@ -321,21 +309,6 @@ static inline int buffer_pending(const struct buffer *buf)
return buf->i;
}
-/* Returns the size of the working area which the caller knows ends at <end>.
- * If <end> equals buf->r (modulo size), then it means that the free area which
- * follows is part of the working area. Otherwise, the working area stops at
- * <end>. It always starts at buf->p. The work area includes the
- * reserved area.
- */
-static inline int buffer_work_area(const struct buffer *buf, const char *end)
-{
- end = buffer_pointer(buf, end);
- if (end == buffer_wrap_add(buf, buf->p + buf->i))
- /* pointer exactly at end, lets push forwards */
- end = buffer_wrap_sub(buf, buf->p - buf->o);
- return buffer_count(buf, buf->p, end);
-}
-
/* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
static inline int buffer_almost_full(const struct buffer *buf)
{
--
2.9.3