Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package haproxy for openSUSE:Factory checked 
in at 2026-03-10 17:48:03
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/haproxy (Old)
 and      /work/SRC/openSUSE:Factory/.haproxy.new.8177 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "haproxy"

Tue Mar 10 17:48:03 2026 rev:178 rq:1337762 version:3.3.5+git0.f0a2d1bf5

Changes:
--------
--- /work/SRC/openSUSE:Factory/haproxy/haproxy.changes  2026-02-20 
17:44:19.191652878 +0100
+++ /work/SRC/openSUSE:Factory/.haproxy.new.8177/haproxy.changes        
2026-03-10 17:48:23.171344564 +0100
@@ -1,0 +2,49 @@
+Mon Mar 09 15:35:52 UTC 2026 - Marcus Rueckert <[email protected]>
+
+- Update to version 3.3.5+git0.f0a2d1bf5:
+  * [RELEASE] Released version 3.3.5
+  * SCRIPTS: git-show-backports: add a restart-from-last option
+  * SCRIPTS: git-show-backports: hide the common ancestor warning in quiet mode
+
+-------------------------------------------------------------------
+Mon Mar 09 14:13:21 UTC 2026 - Marcus Rueckert <[email protected]>
+
+- Update to version 3.3.4+git26.f6b8d4f18:
+  * BUG/MINOR: admin: haproxy-reload rename -vv long option
+  * BUG/MINOR: backend: Don't get proto to use for webscoket if there is no 
server
+  * BUG/MINOR: ssl-sample: Fix sample_conv_sha2() by checking EVP_Digest* 
failures
+
+-------------------------------------------------------------------
+Sat Mar 07 03:07:28 UTC 2026 - Marcus Rueckert <[email protected]>
+
+- Update to version 3.3.4+git23.55381fbd7:
+  * BUG/MINOR: stconn: Increase SC bytes_out value in se_done_ff()
+  * BUG/MEDIUM: hlua: Fix end of request detection when retrieving payload
+  * BUG/MINOR: hlua: Properly enable/disable line receives from HTTP applet
+  * BUG/MEDIUM: mux-fcgi: Use a safe loop to resume each stream eligible for 
sending
+  * BUG/MAJOR: resolvers: Properly lowered the names found in DNS response
+  * BUG/MAJOR: fcgi: Fix param decoding by properly checking its size
+  * BUG/MINOR: http-ana: Increment scf bytes_out value if an haproxy error is 
sent
+  * BUG/MINOR: sample: Fix sample to retrieve the number of bytes received and 
sent
+  * BUG/MINOR: channel: Increase the stconn bytes_in value in 
channel_add_input()
+  * DOC: config: Use the right alias for %B
+  * MINOR: filters: Set last_entity when a filter fails on stream_start 
callback
+  * DEBUG: stream: Display the currently running rule in stream dump
+  * BUG/MINOR: h1-htx: Be sure that H1 response version starts by "HTTP/"
+  * BUG/MEDIUM: qpack: correctly deal with too large decoded numbers
+  * BUG/MINOR: quic: fix OOB read in preferred_address transport parameter
+  * BUG/MINOR: qpack: fix 1-byte OOB read in qpack_decode_fs_pfx()
+  * BUG/MAJOR: qpack: unchecked length passed to huffman decoder
+  * BUG/MEDIUM: hpack: correctly deal with too large decoded numbers
+  * BUG/MEDIUM: stream: Handle TASK_WOKEN_RES as a stream event
+  * BUG/MINOR: promex: fix server iteration when last server is deleted
+
+-------------------------------------------------------------------
+Wed Feb 25 23:54:42 UTC 2026 - Marcus Rueckert <[email protected]>
+
+- Update to version 3.3.4+git3.a330a73f0:
+  * BUG/MEDIUM: mux-h2: make sure to always report pending errors to the stream
+  * MINOR: mux-h2: add a new setting, "tune.h2.log-errors" to tweak error 
logging
+  * MINOR: mux-h2: also count glitches on invalid trailers
+
+-------------------------------------------------------------------

Old:
----
  haproxy-3.3.4+git0.c2bffae0a.tar.gz

New:
----
  haproxy-3.3.5+git0.f0a2d1bf5.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ haproxy.spec ++++++
--- /var/tmp/diff_new_pack.Tb1NRh/_old  2026-03-10 17:48:24.379394432 +0100
+++ /var/tmp/diff_new_pack.Tb1NRh/_new  2026-03-10 17:48:24.383394597 +0100
@@ -49,7 +49,7 @@
 %bcond_with ech
 
 Name:           haproxy
-Version:        3.3.4+git0.c2bffae0a
+Version:        3.3.5+git0.f0a2d1bf5
 Release:        0
 #
 Summary:        The Reliable, High Performance TCP/HTTP Load Balancer

++++++ _service ++++++
--- /var/tmp/diff_new_pack.Tb1NRh/_old  2026-03-10 17:48:24.499399386 +0100
+++ /var/tmp/diff_new_pack.Tb1NRh/_new  2026-03-10 17:48:24.511399881 +0100
@@ -6,7 +6,10 @@
     <param name="versionformat">@PARENT_TAG@+git@TAG_OFFSET@.%h</param>
     <param name="versionrewrite-pattern">v(.*)</param>
     <param name="versionrewrite-replacement">\1</param>
-    <param name="revision">v3.3.4</param>
+    <param name="revision">v3.3.5</param>
+    <!--
+    <param name="revision">master</param>
+    -->
     <param name="changesgenerate">enable</param>
   </service>
 

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.Tb1NRh/_old  2026-03-10 17:48:24.559401862 +0100
+++ /var/tmp/diff_new_pack.Tb1NRh/_new  2026-03-10 17:48:24.567402193 +0100
@@ -5,7 +5,7 @@
   </service>
   <service name="tar_scm">
     <param name="url">http://git.haproxy.org/git/haproxy-3.3.git/</param>
-    <param 
name="changesrevision">c2bffae0a2a44f7562c6ebd455cd8d2e79001fff</param>
+    <param 
name="changesrevision">f0a2d1bf59e9be04eeff82e079afcfb401b9da5e</param>
   </service>
 </servicedata>
 (No newline at EOF)

++++++ haproxy-3.3.4+git0.c2bffae0a.tar.gz -> 
haproxy-3.3.5+git0.f0a2d1bf5.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/CHANGELOG 
new/haproxy-3.3.5+git0.f0a2d1bf5/CHANGELOG
--- old/haproxy-3.3.4+git0.c2bffae0a/CHANGELOG  2026-02-19 14:03:31.000000000 
+0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/CHANGELOG  2026-03-09 16:03:05.000000000 
+0100
@@ -1,6 +1,36 @@
 ChangeLog :
 ===========
 
+2026/03/09 : 3.3.5
+    - MINOR: mux-h2: also count glitches on invalid trailers
+    - MINOR: mux-h2: add a new setting, "tune.h2.log-errors" to tweak error 
logging
+    - BUG/MEDIUM: mux-h2: make sure to always report pending errors to the 
stream
+    - BUG/MINOR: promex: fix server iteration when last server is deleted
+    - BUG/MEDIUM: stream: Handle TASK_WOKEN_RES as a stream event
+    - BUG/MEDIUM: hpack: correctly deal with too large decoded numbers
+    - BUG/MAJOR: qpack: unchecked length passed to huffman decoder
+    - BUG/MINOR: qpack: fix 1-byte OOB read in qpack_decode_fs_pfx()
+    - BUG/MINOR: quic: fix OOB read in preferred_address transport parameter
+    - BUG/MEDIUM: qpack: correctly deal with too large decoded numbers
+    - BUG/MINOR: h1-htx: Be sure that H1 response version starts by "HTTP/"
+    - DEBUG: stream: Display the currently running rule in stream dump
+    - MINOR: filters: Set last_entity when a filter fails on stream_start 
callback
+    - DOC: config: Use the right alias for %B
+    - BUG/MINOR: channel: Increase the stconn bytes_in value in 
channel_add_input()
+    - BUG/MINOR: sample: Fix sample to retrieve the number of bytes received 
and sent
+    - BUG/MINOR: http-ana: Increment scf bytes_out value if an haproxy error 
is sent
+    - BUG/MAJOR: fcgi: Fix param decoding by properly checking its size
+    - BUG/MAJOR: resolvers: Properly lowered the names found in DNS response
+    - BUG/MEDIUM: mux-fcgi: Use a safe loop to resume each stream eligible for 
sending
+    - BUG/MINOR: hlua: Properly enable/disable line receives from HTTP applet
+    - BUG/MEDIUM: hlua: Fix end of request detection when retrieving payload
+    - BUG/MINOR: stconn: Increase SC bytes_out value in se_done_ff()
+    - BUG/MINOR: ssl-sample: Fix sample_conv_sha2() by checking EVP_Digest* 
failures
+    - BUG/MINOR: backend: Don't get proto to use for webscoket if there is no 
server
+    - BUG/MINOR: admin: haproxy-reload rename -vv long option
+    - SCRIPTS: git-show-backports: hide the common ancestor warning in quiet 
mode
+    - SCRIPTS: git-show-backports: add a restart-from-last option
+
 2026/02/19 : 3.3.4
     - DOC: internals: addd mworker V3 internals
     - DOC: proxy-proto: underline the packed attribute for struct pp2_tlv_ssl
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/VERDATE 
new/haproxy-3.3.5+git0.f0a2d1bf5/VERDATE
--- old/haproxy-3.3.4+git0.c2bffae0a/VERDATE    2026-02-19 14:03:31.000000000 
+0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/VERDATE    2026-03-09 16:03:05.000000000 
+0100
@@ -1,2 +1,2 @@
 $Format:%ci$
-2026/02/19
+2026/03/09
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/VERSION 
new/haproxy-3.3.5+git0.f0a2d1bf5/VERSION
--- old/haproxy-3.3.4+git0.c2bffae0a/VERSION    2026-02-19 14:03:31.000000000 
+0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/VERSION    2026-03-09 16:03:05.000000000 
+0100
@@ -1 +1 @@
-3.3.4
+3.3.5
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/haproxy-3.3.4+git0.c2bffae0a/addons/promex/service-prometheus.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/addons/promex/service-prometheus.c
--- old/haproxy-3.3.4+git0.c2bffae0a/addons/promex/service-prometheus.c 
2026-02-19 14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/addons/promex/service-prometheus.c 
2026-03-09 16:03:05.000000000 +0100
@@ -1223,9 +1223,6 @@
                if (promex_filter_metric(appctx, prefix, name))
                        continue;
 
-               if (!px)
-                       px = proxies_list;
-
                while (px) {
                        struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
                        enum promex_mt_type type;
@@ -1245,11 +1242,6 @@
                        if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || 
!(px->cap & PR_CAP_BE))
                                goto next_px;
 
-                       if (!sv) {
-                               watcher_attach(&ctx->srv_watch, px->srv);
-                               sv = px->srv;
-                       }
-
                        while (sv) {
                                labels[lb_idx].name  = ist("server");
                                labels[lb_idx].value = ist2(sv->id, 
strlen(sv->id));
@@ -1406,8 +1398,21 @@
                  next_px:
                        watcher_detach(&ctx->srv_watch);
                        px = px->next;
+                       if (px) {
+                               /* Update ctx.p[1] via watcher. */
+                               watcher_attach(&ctx->srv_watch, px->srv);
+                               sv = ctx->p[1];
+                       }
                }
                ctx->flags |= PROMEX_FL_METRIC_HDR;
+
+               /* Prepare a new iteration for the next stat column. */
+               px = proxies_list;
+               if (likely(px)) {
+                       /* Update ctx.p[1] via watcher. */
+                       watcher_attach(&ctx->srv_watch, px->srv);
+                       sv = ctx->p[1];
+               }
        }
 
        /* Skip extra counters */
@@ -1432,9 +1437,6 @@
                        if (promex_filter_metric(appctx, prefix, name))
                                continue;
 
-                       if (!px)
-                               px = proxies_list;
-
                        while (px) {
                                struct promex_label labels[PROMEX_MAX_LABELS-1] 
= {};
                                struct promex_metric metric;
@@ -1455,11 +1457,6 @@
                                if ((px->flags & PR_FL_DISABLED) || px->uuid <= 
0 || !(px->cap & PR_CAP_BE))
                                        goto next_px2;
 
-                               if (!sv) {
-                                       watcher_attach(&ctx->srv_watch, 
px->srv);
-                                       sv = px->srv;
-                               }
-
                                while (sv) {
                                        labels[lb_idx].name  = ist("server");
                                        labels[lb_idx].value = ist2(sv->id, 
strlen(sv->id));
@@ -1489,27 +1486,44 @@
                          next_px2:
                                watcher_detach(&ctx->srv_watch);
                                px = px->next;
+                               if (px) {
+                                       /* Update ctx.p[1] via watcher. */
+                                       watcher_attach(&ctx->srv_watch, 
px->srv);
+                                       sv = ctx->p[1];
+                               }
                        }
                        ctx->flags |= PROMEX_FL_METRIC_HDR;
+
+                       /* Prepare a new iteration for the next stat column. */
+                       px = proxies_list;
+                       if (likely(px)) {
+                               /* Update ctx.p[1] via watcher. */
+                               watcher_attach(&ctx->srv_watch, px->srv);
+                               sv = ctx->p[1];
+                       }
                }
 
                ctx->field_num += mod->stats_count;
                ctx->mod_field_num = 0;
        }
 
-       px = NULL;
-       sv = NULL;
-       mod = NULL;
-
   end:
+       if (ret) {
+               watcher_detach(&ctx->srv_watch);
+               px = NULL;
+               mod = NULL;
+       }
+
        if (out.len) {
                if (!htx_add_data_atonce(htx, out))
                        return -1; /* Unexpected and unrecoverable error */
        }
 
-       /* Save pointers (0=current proxy, 1=current server, 2=current stats 
module) of the current context */
+       /* Save pointers of the current context for dump resumption :
+        * 0=current proxy, 1=current server, 2=current stats module
+        * Note that p[1] is already automatically updated via srv_watch.
+        */
        ctx->p[0] = px;
-       ctx->p[1] = sv;
        ctx->p[2] = mod;
        return ret;
   full:
@@ -1756,6 +1770,14 @@
                        ctx->field_num = ST_I_PX_PXNAME;
                        ctx->mod_field_num = 0;
                        appctx->st1 = PROMEX_DUMPER_SRV;
+
+                       if (ctx->flags & PROMEX_FL_SCOPE_SERVER) {
+                               ctx->p[0] = proxies_list;
+                               if (likely(proxies_list)) {
+                                       /* Update ctx.p[1] via watcher. */
+                                       watcher_attach(&ctx->srv_watch, 
proxies_list->srv);
+                               }
+                       }
                        __fallthrough;
 
                case PROMEX_DUMPER_SRV:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/haproxy-3.3.4+git0.c2bffae0a/admin/cli/haproxy-reload 
new/haproxy-3.3.5+git0.f0a2d1bf5/admin/cli/haproxy-reload
--- old/haproxy-3.3.4+git0.c2bffae0a/admin/cli/haproxy-reload   2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/admin/cli/haproxy-reload   2026-03-09 
16:03:05.000000000 +0100
@@ -57,7 +57,7 @@
        echo "  -t,  --timeout                Timeout (socat -t) (default: 
${TIMEOUT})"
        echo "  -s,  --silent                 Slient mode (no output)"
        echo "  -v,  --verbose                Verbose output (output from 
haproxy on failure)"
-       echo "  -vv                           Even more verbose output (output 
from haproxy on success and failure)"
+       echo "  -vv  --verbose=all            Very verbose output (output from 
haproxy on success and failure)"
        echo "  -h,  --help                   This help"
        echo ""
        echo "Examples:"
@@ -84,7 +84,7 @@
                                VERBOSE=2
                                shift
                                ;;
-                       -vv|--verbose)
+                       -vv|--verbose=all)
                                VERBOSE=3
                                shift
                                ;;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/doc/configuration.txt 
new/haproxy-3.3.5+git0.f0a2d1bf5/doc/configuration.txt
--- old/haproxy-3.3.4+git0.c2bffae0a/doc/configuration.txt      2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/doc/configuration.txt      2026-03-09 
16:03:05.000000000 +0100
@@ -3,7 +3,7 @@
                           Configuration Manual
                          ----------------------
                               version 3.3
-                              2026/02/19
+                              2026/03/09
 
 
 This document covers the configuration language as implemented in the version
@@ -4370,6 +4370,16 @@
   specific settings tune.h2.fe.initial-window-size and
   tune.h2.be.initial-window-size.
 
+tune.h2.log-errors { none | connection | stream }
+  Sets the level of errors in the H2 demultiplexer that will generate a log.
+  The default is "stream", which means that any decoding error encountered in
+  the demultiplexer will lead to the emission of a log. The "connection" value
+  indicates that only logs that result in invalidating the connection will
+  produce a log. Finally, "none" indicates that no decoding error will produce
+  any log. It is recommended to set at least "connection" in order to detect
+  protocol anomalies, even if this means temporarily switching to "none" during
+  difficult periods.
+
 tune.h2.max-concurrent-streams <number>
   Sets the default HTTP/2 maximum number of concurrent streams per connection
   (i.e. the number of outstanding requests on a single connection). This value
@@ -27974,7 +27984,7 @@
   |                          Others                                           |
   +---+------+------------------------------------------------------+---------+
   |   | %B   | bytes_read           (from server to client)         | numeric |
-  |   |      | %[req.bytes_in]                                      |         |
+  |   |      | %[res.bytes_in]                                      |         |
   +---+------+------------------------------------------------------+---------+
   | H | %CC  | captured_request_cookie                              | string  |
   +---+------+------------------------------------------------------+---------+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/haproxy-3.3.4+git0.c2bffae0a/include/haproxy/channel.h 
new/haproxy-3.3.5+git0.f0a2d1bf5/include/haproxy/channel.h
--- old/haproxy-3.3.4+git0.c2bffae0a/include/haproxy/channel.h  2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/include/haproxy/channel.h  2026-03-09 
16:03:05.000000000 +0100
@@ -376,6 +376,7 @@
                c_adv(chn, fwd);
        }
        /* notify that some data was read */
+       chn_prod(chn)->bytes_in += len;
        chn->flags |= CF_READ_EVENT;
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/haproxy-3.3.4+git0.c2bffae0a/include/haproxy/stconn.h 
new/haproxy-3.3.5+git0.f0a2d1bf5/include/haproxy/stconn.h
--- old/haproxy-3.3.4+git0.c2bffae0a/include/haproxy/stconn.h   2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/include/haproxy/stconn.h   2026-03-09 
16:03:05.000000000 +0100
@@ -568,7 +568,7 @@
                        }
                }
        }
-
+       se->sc->bytes_out += ret;
        return ret;
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/haproxy-3.3.4+git0.c2bffae0a/include/haproxy/stream-t.h 
new/haproxy-3.3.5+git0.f0a2d1bf5/include/haproxy/stream-t.h
--- old/haproxy-3.3.4+git0.c2bffae0a/include/haproxy/stream-t.h 2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/include/haproxy/stream-t.h 2026-03-09 
16:03:05.000000000 +0100
@@ -180,6 +180,7 @@
        STRM_EVT_SHUT_SRV_DOWN = 0x00000004, /* Must be shut because the 
selected server became available */
        STRM_EVT_SHUT_SRV_UP   = 0x00000008, /* Must be shut because a 
preferred server became available */
        STRM_EVT_KILLED        = 0x00000010, /* Must be shut for external 
reason */
+       STRM_EVT_RES           = 0x00000020, /* A requested resource is 
available (a buffer, a conn_slot...) */
 };
 
 /* This function is used to report flags in debugging tools. Please reflect
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/haproxy-3.3.4+git0.c2bffae0a/include/haproxy/stream.h 
new/haproxy-3.3.5+git0.f0a2d1bf5/include/haproxy/stream.h
--- old/haproxy-3.3.4+git0.c2bffae0a/include/haproxy/stream.h   2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/include/haproxy/stream.h   2026-03-09 
16:03:05.000000000 +0100
@@ -412,6 +412,7 @@
 static inline unsigned int stream_map_task_state(unsigned int state)
 {
        return ((state & TASK_WOKEN_TIMER) ? STRM_EVT_TIMER : 0)         |
+               ((state & TASK_WOKEN_RES)  ? STRM_EVT_RES : 0)           |
                ((state & TASK_WOKEN_MSG)  ? STRM_EVT_MSG : 0)           |
                ((state & TASK_F_UEVT1)    ? STRM_EVT_SHUT_SRV_DOWN : 0) |
                ((state & TASK_F_UEVT3)    ? STRM_EVT_SHUT_SRV_UP : 0)   |
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/haproxy-3.3.4+git0.c2bffae0a/scripts/git-show-backports 
new/haproxy-3.3.5+git0.f0a2d1bf5/scripts/git-show-backports
--- old/haproxy-3.3.4+git0.c2bffae0a/scripts/git-show-backports 2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/scripts/git-show-backports 2026-03-09 
16:03:05.000000000 +0100
@@ -28,7 +28,7 @@
 #   show-backports -q -m -r hapee-r2 hapee-r1
 
 
-USAGE="Usage: ${0##*/} [-q] [-H] [-m] [-u] [-r reference] [-l logexpr] [-s 
subject] [-b base] {branch|range} [...] [-- file*]"
+USAGE="Usage: ${0##*/} [-q] [-H] [-m] [-u] [-L] [-r reference] [-l logexpr] 
[-s subject] [-b base] {branch|range} [...] [-- file*]"
 BASES=( )
 BRANCHES=( )
 REF=
@@ -39,6 +39,7 @@
 MISSING=
 UPSTREAM=
 BODYHASH=
+SINCELAST=
 
 die() {
        [ "$#" -eq 0 ] || echo "$*" >&2
@@ -70,7 +71,7 @@
        count=0
        # now look up commits
        while read ref subject; do
-               if [ -n "$MISSING" -a "${subject:0:9}" = "[RELEASE]" ]; then
+               if [ -n "$MISSING" -o -n "$SINCELAST" ] && [ "${subject:0:9}" = 
"[RELEASE]" ]; then
                        continue
                fi
 
@@ -153,6 +154,7 @@
                -m)        MISSING=1      ; shift   ;;
                -u)        UPSTREAM=1     ; shift   ;;
                -H)        BODYHASH=1     ; shift   ;;
+               -L)        SINCELAST=1    ; shift   ;;
                -h|--help) quit "$USAGE" ;;
                *)         die  "$USAGE" ;;
        esac
@@ -255,7 +257,7 @@
 fi
 
 if [ -z "$BASE" ]; then
-       err "Warning! No base specified, looking for common ancestor."
+       [ "$QUIET" != "" ] || err "Warning! No base specified, looking for 
common ancestor."
        BASE=$(git merge-base --all "$REF" "${BRANCHES[@]}")
        if [ -z "$BASE" ]; then
                die "Couldn't find a common ancestor between these branches"
@@ -297,9 +299,23 @@
 (
        left_commits=( )
        right_commits=( )
+       since_last=( )
+       last_bkp=$BASE
        while read line; do
                # append the subject at the end of the line
                set -- $line
+               if [ -n "$SINCELAST" ]; then
+                       if [ "${line::1}" = ":" ]; then
+                               continue
+                       fi
+                       if [ "$2" != "-" ]; then
+                               last_bkp="$1"
+                               since_last=( )
+                       else
+                               since_last[${#since_last[@]}]="$1"
+                       fi
+                       continue
+               fi
                echo -n "$line "
                if [ "${line::1}" = ":" ]; then
                        echo "---- Subject ----"
@@ -315,7 +331,14 @@
                        right_commits[${#right_commits[@]}]="$comm"
                fi
        done
-       if [ -n "$MISSING" -a ${#left_commits[@]} -eq 0 ]; then
+       if [ -n "$SINCELAST" -a ${#since_last[@]} -eq 0 ]; then
+               echo "No new commit upstream since last commit $last_bkp."
+       elif [ -n "$SINCELAST" ]; then
+               echo "Found ${#since_last[@]} commit(s) added to branch $REF 
since last backported commit $last_bkp:"
+               echo
+               echo "   git cherry-pick -sx ${since_last[@]}"
+               echo
+       elif [ -n "$MISSING" -a ${#left_commits[@]} -eq 0 ]; then
                echo "No missing commit to apply."
        elif [ -n "$MISSING" ]; then
                echo
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/backend.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/backend.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/backend.c      2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/backend.c      2026-03-09 
16:03:05.000000000 +0100
@@ -2202,7 +2202,7 @@
         */
        if (may_start_mux_now) {
                const struct mux_ops *alt_mux =
-                 likely(!(s->flags & SF_WEBSOCKET)) ? NULL : 
srv_get_ws_proto(srv);
+                 likely(!(s->flags & SF_WEBSOCKET) || !srv) ? NULL : 
srv_get_ws_proto(srv);
                if (conn_install_mux_be(srv_conn, s->scb, s->sess, alt_mux) < 
0) {
                        conn_full_close(srv_conn);
                        return SF_ERR_INTERNAL;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/fcgi.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/fcgi.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/fcgi.c 2026-02-19 14:03:31.000000000 
+0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/fcgi.c 2026-03-09 16:03:05.000000000 
+0100
@@ -198,7 +198,7 @@
                len += 4;
        }
 
-       if (data < nlen + vlen)
+       if (data < o + nlen + vlen)
                return 0;
 
        p->n = ist2(b_peek(in, o), nlen);
@@ -253,7 +253,7 @@
                len += 4;
        }
 
-       if (data < nlen + vlen)
+       if (data < o + nlen + vlen)
                return 0;
 
        p->n = ist2(in->area + o, nlen);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/filters.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/filters.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/filters.c      2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/filters.c      2026-03-09 
16:03:05.000000000 +0100
@@ -501,8 +501,11 @@
        list_for_each_entry(filter, &strm_flt(s)->filters, list) {
                if (FLT_OPS(filter)->stream_start) {
                        filter->calls++;
-                       if (FLT_OPS(filter)->stream_start(s, filter) < 0)
+                       if (FLT_OPS(filter)->stream_start(s, filter) < 0) {
+                               s->last_entity.type = STRM_ENTITY_FILTER;
+                               s->last_entity.ptr = filter;
                                return -1;
+                       }
                }
        }
        if (strm_li(s) && (strm_li(s)->bind_conf->analysers & 
AN_REQ_FLT_START_FE)) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/h1_htx.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/h1_htx.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/h1_htx.c       2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/h1_htx.c       2026-03-09 
16:03:05.000000000 +0100
@@ -100,7 +100,7 @@
                if (sl->st.v.len != 8)
                        return 0;
 
-               if (*(sl->st.v.ptr + 4) != '/' ||
+               if (!istnmatch(sl->st.v, ist("HTTP/"), 5) ||
                    !isdigit((unsigned char)*(sl->st.v.ptr + 5)) ||
                    *(sl->st.v.ptr + 6) != '.' ||
                    !isdigit((unsigned char)*(sl->st.v.ptr + 7)))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/hlua.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/hlua.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/hlua.c 2026-02-19 14:03:31.000000000 
+0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/hlua.c 2026-03-09 16:03:05.000000000 
+0100
@@ -382,6 +382,7 @@
 
 /* Applet status flags */
 #define APPLET_DONE     0x01 /* applet processing is done. */
+#define APPLET_REQ_RECV 0x02 /* The request was fully received */
 /* unused: 0x02 */
 #define APPLET_HDR_SENT 0x04 /* Response header sent. */
 /* unused: 0x08, 0x10 */
@@ -5771,11 +5772,15 @@
 __LJMP static int hlua_applet_http_getline_yield(lua_State *L, int status, 
lua_KContext ctx)
 {
        struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+       struct hlua_http_ctx *http_ctx = luactx->appctx->svcctx;
        struct buffer *inbuf = applet_get_inbuf(luactx->appctx);
        struct htx *htx;
        struct htx_blk *blk;
        int stop = 0;
 
+       if (http_ctx->flags & APPLET_REQ_RECV)
+               goto end;
+
        if (!inbuf)
                goto wait;
 
@@ -5824,8 +5829,10 @@
        /* The message was fully consumed and no more data are expected
         * (EOM flag set).
         */
-       if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM))
+       if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM)) {
+               http_ctx->flags |= APPLET_REQ_RECV;
                stop = 1;
+       }
 
        htx_to_buf(htx, inbuf);
        if (!stop) {
@@ -5834,6 +5841,10 @@
                MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_http_getline_yield, 
TICK_ETERNITY, 0));
        }
 
+  end:
+       /* Stop to consume until the next receive or the end of the response */
+       applet_wont_consume(luactx->appctx);
+
        /* return the result. */
        luaL_pushresult(&luactx->b);
        return 1;
@@ -5845,6 +5856,9 @@
 {
        struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
 
+       /* Restart to consume - could have been disabled by a previous receive 
*/
+       applet_will_consume(luactx->appctx);
+
        /* Initialise the string catenation. */
        luaL_buffinit(L, &luactx->b);
 
@@ -5858,11 +5872,15 @@
 __LJMP static int hlua_applet_http_recv_yield(lua_State *L, int status, 
lua_KContext ctx)
 {
        struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+       struct hlua_http_ctx *http_ctx = luactx->appctx->svcctx;
        struct buffer *inbuf = applet_get_inbuf(luactx->appctx);
        struct htx *htx;
        struct htx_blk *blk;
        int len;
 
+       if (http_ctx->flags & APPLET_REQ_RECV)
+               goto end;
+
        if (!inbuf)
                goto wait;
 
@@ -5910,8 +5928,10 @@
        /* The message was fully consumed and no more data are expected
         * (EOM flag set).
         */
-       if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM))
+       if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM)) {
+               http_ctx->flags |= APPLET_REQ_RECV;
                len = 0;
+       }
 
        htx_to_buf(htx, inbuf);
        applet_fl_clr(luactx->appctx, APPCTX_FL_INBLK_FULL);
@@ -5927,6 +5947,7 @@
                MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_http_recv_yield, 
TICK_ETERNITY, 0));
        }
 
+  end:
        /* Stop to consume until the next receive or the end of the response */
        applet_wont_consume(luactx->appctx);
 
@@ -11503,6 +11524,9 @@
        if (!(strm->flags & SF_ERR_MASK))
                strm->flags |= SF_ERR_RESOURCE;
        http_ctx->flags |= APPLET_DONE;
+
+       /* Restart to consume to drain the request */
+       applet_will_consume(ctx);
        goto out;
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/hpack-dec.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/hpack-dec.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/hpack-dec.c    2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/hpack-dec.c    2026-03-09 
16:03:05.000000000 +0100
@@ -50,13 +50,15 @@
 
 /* reads a varint from <raw>'s lowest <b> bits and <len> bytes max (raw 
included).
  * returns the 32-bit value on success after updating raw_in and len_in. Forces
- * len_in to (uint32_t)-1 on truncated input.
+ * len_in to (uint32_t)-1 on truncated input. The caller is responsible for
+ * providing a non-zero <len_in> on input.
  */
 static uint32_t get_var_int(const uint8_t **raw_in, uint32_t *len_in, int b)
 {
        uint32_t ret = 0;
        int len = *len_in;
        const uint8_t *raw = *raw_in;
+       uint32_t v, max = ~0;
        uint8_t shift = 0;
 
        len--;
@@ -64,23 +66,26 @@
        if (ret != (uint32_t)((1 << b) - 1))
                goto end;
 
-       while (len && (*raw & 128)) {
-               ret += ((uint32_t)(*raw++) & 127) << shift;
-               shift += 7;
+       do {
+               if (!len)
+                       goto too_short;
+               v = *raw++;
                len--;
-       }
-
-       /* last 7 bits */
-       if (!len)
-               goto too_short;
-       len--;
-       ret += ((uint32_t)(*raw++) & 127) << shift;
+               if (v & 127) { // make UBSan happy
+                       if ((v & 127) > max)
+                               goto too_large;
+                       ret += (v & 127) << shift;
+               }
+               max >>= 7;
+               shift += 7;
+       } while (v & 128);
 
  end:
        *raw_in = raw;
        *len_in = len;
        return ret;
 
+ too_large:
  too_short:
        *len_in = (uint32_t)-1;
        return 0;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/http_ana.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/http_ana.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/http_ana.c     2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/http_ana.c     2026-03-09 
16:03:05.000000000 +0100
@@ -2817,7 +2817,6 @@
                int forced = s->flags & SF_RULE_FYIELD;
 
                rule = s->current_rule;
-               s->current_rule = NULL;
                s->flags &= ~SF_RULE_FYIELD;
                if (s->current_rule_list == rules || (def_rules && 
s->current_rule_list == def_rules)) {
                        if (forced)
@@ -2833,11 +2832,12 @@
 
        list_for_each_entry(rule, s->current_rule_list, list) {
  resume_rule:
+               s->current_rule = rule;
+
                /* check if budget is exceeded and we need to continue on the 
next
                 * polling loop, unless we know that we cannot yield
                 */
                if (s->rules_bcount++ >= global.tune.max_rules_at_once && 
!(act_opts & ACT_OPT_FINAL)) {
-                       s->current_rule = rule;
                        s->flags |= SF_RULE_FYIELD;
                        rule_ret = HTTP_RULE_RES_FYIELD;
                        task_wakeup(s->task, TASK_WOKEN_MSG);
@@ -2869,7 +2869,6 @@
                                        s->last_entity.ptr  = rule;
                                        goto end;
                                case ACT_RET_YIELD:
-                                       s->current_rule = rule;
                                        if (act_opts & ACT_OPT_FINAL) {
                                                send_log(s->be, LOG_WARNING,
                                                         "Internal error: 
action yields while it is  no long allowed "
@@ -2959,13 +2958,17 @@
 
        if (def_rules && s->current_rule_list == def_rules) {
                s->current_rule_list = rules;
+               s->current_rule = NULL;
                goto restart;
        }
 
   end:
        /* if the ruleset evaluation is finished reset the strict mode */
-       if (rule_ret != HTTP_RULE_RES_YIELD && rule_ret != HTTP_RULE_RES_FYIELD)
+       if (rule_ret != HTTP_RULE_RES_YIELD && rule_ret != 
HTTP_RULE_RES_FYIELD) {
+               s->current_rule_list = NULL;
+               s->current_rule = NULL;
                txn->req.flags &= ~HTTP_MSGF_SOFT_RW;
+       }
 
        /* we reached the end of the rules, nothing to report */
        return rule_ret;
@@ -3005,7 +3008,6 @@
                int forced = s->flags & SF_RULE_FYIELD;
 
                rule = s->current_rule;
-               s->current_rule = NULL;
                s->flags &= ~SF_RULE_FYIELD;
                if (s->current_rule_list == rules || (def_rules && 
s->current_rule_list == def_rules)) {
                        if (forced)
@@ -3022,11 +3024,12 @@
 
        list_for_each_entry(rule, s->current_rule_list, list) {
  resume_rule:
+               s->current_rule = rule;
+
                /* check if budget is exceeded and we need to continue on the 
next
                 * polling loop, unless we know that we cannot yield
                 */
                if (s->rules_bcount++ >= global.tune.max_rules_at_once && 
!(act_opts & ACT_OPT_FINAL)) {
-                       s->current_rule = rule;
                        s->flags |= SF_RULE_FYIELD;
                        rule_ret = HTTP_RULE_RES_FYIELD;
                        task_wakeup(s->task, TASK_WOKEN_MSG);
@@ -3058,7 +3061,6 @@
                                        s->last_entity.ptr  = rule;
                                        goto end;
                                case ACT_RET_YIELD:
-                                       s->current_rule = rule;
                                        if (act_opts & ACT_OPT_FINAL) {
                                                send_log(s->be, LOG_WARNING,
                                                         "Internal error: 
action yields while it is no long allowed "
@@ -3138,13 +3140,17 @@
 
        if (def_rules && s->current_rule_list == def_rules) {
                s->current_rule_list = rules;
+               s->current_rule = NULL;
                goto restart;
        }
 
   end:
        /* if the ruleset evaluation is finished reset the strict mode */
-       if (rule_ret != HTTP_RULE_RES_YIELD && rule_ret != HTTP_RULE_RES_FYIELD)
+       if (rule_ret != HTTP_RULE_RES_YIELD && rule_ret != 
HTTP_RULE_RES_FYIELD) {
+               s->current_rule_list = NULL;
+               s->current_rule = NULL;
                txn->rsp.flags &= ~HTTP_MSGF_SOFT_RW;
+       }
 
        /* we reached the end of the rules, nothing to report */
        return rule_ret;
@@ -4712,6 +4718,7 @@
        data = htx->data - co_data(res);
        c_adv(res, data);
        htx->first = -1;
+       s->scf->bytes_out += data;
        return 1;
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/mux_fcgi.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/mux_fcgi.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/mux_fcgi.c     2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/mux_fcgi.c     2026-03-09 
16:03:05.000000000 +0100
@@ -2986,9 +2986,9 @@
         * for us.
         */
        if (!(fconn->flags & (FCGI_CF_MUX_MFULL | FCGI_CF_DEM_MROOM)) && 
fconn->state >= FCGI_CS_RECORD_H) {
-               struct fcgi_strm *fstrm;
+               struct fcgi_strm *fstrm, *fstrm_back;
 
-               list_for_each_entry(fstrm, &fconn->send_list, send_list) {
+               list_for_each_entry_safe(fstrm, fstrm_back, &fconn->send_list, 
send_list) {
                        if (fconn->state == FCGI_CS_CLOSED || fconn->flags & 
FCGI_CF_MUX_BLOCK_ANY)
                                break;
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/mux_h2.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/mux_h2.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/mux_h2.c       2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/mux_h2.c       2026-03-09 
16:03:05.000000000 +0100
@@ -466,6 +466,12 @@
 /* maximum amount of data we're OK with re-aligning for buffer optimizations */
 #define MAX_DATA_REALIGN 1024
 
+enum {
+       H2_ERR_LOG_ERR_NONE,
+       H2_ERR_LOG_ERR_CONN,
+       H2_ERR_LOG_ERR_STRM,
+};
+
 /* a few settings from the global section */
 static int h2_settings_header_table_size      =  4096; /* initial value */
 static int h2_settings_initial_window_size    =     0; /* default initial 
value: bufsize */
@@ -479,6 +485,7 @@
 static unsigned int h2_be_settings_max_concurrent_streams =   0; /* backend 
value */
 static unsigned int h2_fe_settings_max_concurrent_streams =   0; /* frontend 
value */
 static int h2_settings_max_frame_size         = 0;     /* unset */
+static int h2_settings_log_errors             = H2_ERR_LOG_ERR_STRM;
 
 /* other non-protocol settings */
 static unsigned int h2_fe_max_total_streams =   0;      /* frontend value */
@@ -842,6 +849,18 @@
        return !!htx_free_data_space(htx);
 }
 
+void h2_sess_log_conn(struct session *sess)
+{
+       if (h2_settings_log_errors >= H2_ERR_LOG_ERR_CONN)
+               sess_log(sess);
+}
+
+void h2_sess_log_strm(struct session *sess)
+{
+       if (h2_settings_log_errors >= H2_ERR_LOG_ERR_STRM)
+               sess_log(sess);
+}
+
 /* update h2c timeout if needed */
 static void h2c_update_timeout(struct h2c *h2c)
 {
@@ -2148,7 +2167,7 @@
  out_alloc:
        TRACE_ERROR("Failed to allocate a new stream", 
H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
  out:
-       sess_log(sess);
+       h2_sess_log_strm(sess);
        TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
        return NULL;
 }
@@ -2900,7 +2919,7 @@
        return 1;
  fail:
        if (!(h2c->flags & H2_CF_IS_BACK))
-               sess_log(h2c->conn->owner);
+               h2_sess_log_conn(h2c->conn->owner);
        h2c_error(h2c, error);
  out0:
        TRACE_DEVEL("leaving with missing data or error", 
H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
@@ -3483,7 +3502,7 @@
                        /* unrecoverable error ? */
                        if (h2c->st0 >= H2_CS_ERROR) {
                                TRACE_USER("Unrecoverable error decoding H2 
trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, 
h2c->conn, 0, h2s_rxbuf_tail(h2s));
-                               sess_log(h2c->conn->owner);
+                               h2_sess_log_conn(h2c->conn->owner);
                                goto out;
                        }
 
@@ -3500,7 +3519,7 @@
                                /* Failed to decode this frame (e.g. too large 
request)
                                 * but the HPACK decompressor is still 
synchronized.
                                 */
-                               sess_log(h2c->conn->owner);
+                               h2_sess_log_strm(h2c->conn->owner);
                                h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
                                TRACE_USER("Stream error decoding H2 trailers", 
H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, 
h2s_rxbuf_tail(h2s));
                                h2c->st0 = H2_CS_FRAME_E;
@@ -3512,8 +3531,9 @@
                 * the data and send another RST.
                 */
                error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
-               sess_log(h2c->conn->owner);
+               h2_sess_log_strm(h2c->conn->owner);
                h2s = (struct h2s*)h2_error_stream;
+               h2c_report_glitch(h2c, 1, "rcvd H2 trailers on closed stream");
                TRACE_USER("rcvd H2 trailers on closed stream", 
H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, h2s, 
&rxbuf);
                goto send_rst;
        }
@@ -3523,7 +3543,7 @@
                h2c_report_glitch(h2c, 1, "HEADERS on invalid stream ID");
                TRACE_ERROR("HEADERS on invalid stream ID", 
H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
                HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
-               sess_log(h2c->conn->owner);
+               h2_sess_log_conn(h2c->conn->owner);
                session_inc_http_req_ctr(h2c->conn->owner);
                session_inc_http_err_ctr(h2c->conn->owner);
                goto conn_err;
@@ -3541,7 +3561,7 @@
                h2c_report_glitch(h2c, 1, "Stream limit violated");
                TRACE_STATE("Stream limit violated", H2_EV_STRM_SHUT, 
h2c->conn);
                HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
-               sess_log(h2c->conn->owner);
+               h2_sess_log_conn(h2c->conn->owner);
                session_inc_http_req_ctr(h2c->conn->owner);
                session_inc_http_err_ctr(h2c->conn->owner);
                goto conn_err;
@@ -3574,16 +3594,17 @@
                 * error code in the connection and counted it in the relevant
                 * stats. We still count a req error in both cases.
                 */
-               sess_log(h2c->conn->owner);
                session_inc_http_req_ctr(h2c->conn->owner);
                session_inc_http_err_ctr(h2c->conn->owner);
 
                if (h2c->st0 >= H2_CS_ERROR) {
+                       h2_sess_log_conn(h2c->conn->owner);
                        TRACE_USER("Unrecoverable error decoding H2 request", 
H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, 
&rxbuf);
                        goto out;
                }
 
                /* recoverable stream error (e.g. too large request) */
+               h2_sess_log_strm(h2c->conn->owner);
                TRACE_USER("rcvd unparsable H2 request", 
H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, h2s, 
&rxbuf);
                goto strm_err;
        }
@@ -3945,7 +3966,7 @@
                h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
                if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
                        /* only log if no other stream can report the error */
-                       sess_log(h2c->conn->owner);
+                       h2_sess_log_conn(h2c->conn->owner);
                }
                HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
                TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", 
H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
@@ -4215,7 +4236,7 @@
                                        h2c->st0 = H2_CS_ERROR2;
                                        if (b_data(&h2c->dbuf) ||
                                            !(((const struct session 
*)h2c->conn->owner)->fe->options & (PR_O_NULLNOLOG|PR_O_IGNORE_PRB)))
-                                               sess_log(h2c->conn->owner);
+                                               
h2_sess_log_conn(h2c->conn->owner);
                                }
                                goto done;
                        }
@@ -4239,7 +4260,7 @@
                                        TRACE_ERROR("failed to receive 
settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, 
h2c->conn);
                                        h2c->st0 = H2_CS_ERROR2;
                                        if (!(h2c->flags & H2_CF_IS_BACK))
-                                               sess_log(h2c->conn->owner);
+                                               
h2_sess_log_conn(h2c->conn->owner);
                                }
                                goto done;
                        }
@@ -4251,7 +4272,7 @@
                                h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
                                h2c->st0 = H2_CS_ERROR2;
                                if (!(h2c->flags & H2_CF_IS_BACK))
-                                       sess_log(h2c->conn->owner);
+                                       h2_sess_log_conn(h2c->conn->owner);
                                
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
                                goto done;
                        }
@@ -4263,7 +4284,7 @@
                                h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
                                h2c->st0 = H2_CS_ERROR2;
                                if (!(h2c->flags & H2_CF_IS_BACK))
-                                       sess_log(h2c->conn->owner);
+                                       h2_sess_log_conn(h2c->conn->owner);
                                goto done;
                        }
 
@@ -4309,7 +4330,7 @@
                                h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
                                if (!h2c->nb_streams && !(h2c->flags & 
H2_CF_IS_BACK)) {
                                        /* only log if no other stream can 
report the error */
-                                       sess_log(h2c->conn->owner);
+                                       h2_sess_log_conn(h2c->conn->owner);
                                }
                                
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
                                break;
@@ -4339,7 +4360,7 @@
                                        TRACE_ERROR("invalid H2 padded frame 
length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
                                        h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
                                        if (!(h2c->flags & H2_CF_IS_BACK))
-                                               sess_log(h2c->conn->owner);
+                                               
h2_sess_log_conn(h2c->conn->owner);
                                        
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
                                        goto done;
                                }
@@ -4360,7 +4381,7 @@
                                         */
                                        h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
                                        if (!(h2c->flags & H2_CF_IS_BACK))
-                                               sess_log(h2c->conn->owner);
+                                               
h2_sess_log_conn(h2c->conn->owner);
                                        
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
                                        goto done;
                                }
@@ -4390,7 +4411,7 @@
                                TRACE_ERROR("received invalid H2 frame header", 
H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
                                h2c_error(h2c, ret);
                                if (!(h2c->flags & H2_CF_IS_BACK))
-                                       sess_log(h2c->conn->owner);
+                                       h2_sess_log_conn(h2c->conn->owner);
                                
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
                                goto done;
                        }
@@ -4483,7 +4504,7 @@
                        TRACE_ERROR("received unexpected H2 CONTINUATION 
frame", H2_EV_RX_FRAME|H2_EV_RX_CONT|H2_EV_H2C_ERR, h2c->conn, h2s);
                        h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
                        if (!(h2c->flags & H2_CF_IS_BACK))
-                               sess_log(h2c->conn->owner);
+                               h2_sess_log_conn(h2c->conn->owner);
                        HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
                        goto done;
 
@@ -6312,6 +6333,7 @@
 
        /* Trailers terminate a DATA sequence */
        if (h2_make_htx_trailers(list, htx) <= 0) {
+               h2c_report_glitch(h2c, 1, "failed to append HTX trailers into 
rxbuf");
                TRACE_STATE("failed to append HTX trailers into rxbuf", 
H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
                htx->flags |= HTX_FL_PARSING_ERROR;
                goto fail;
@@ -7862,7 +7884,13 @@
 
        /* tell the stream layer whether there are data left or not */
        if (h2s_rxbuf_cnt(h2s)) {
+               /* Note that parsing errors can also arrive here, we may need
+                * to propagate errors upstream otherwise no new activity will
+                * unblock them.
+                */
                se_fl_set(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+               if (h2s_htx && h2s_htx->flags & HTX_FL_PARSING_ERROR)
+                       h2s_propagate_term_flags(h2c, h2s);
                BUG_ON_HOT(!buf->data);
        }
        else {
@@ -8647,6 +8675,31 @@
        return 0;
 }
 
+/* config parser for global "tune.h2.log-errors" */
+static int h2_parse_log_errors(char **args, int section_type, struct proxy 
*curpx,
+                                        const struct proxy *defpx, const char 
*file, int line,
+                                        char **err)
+{
+       int *vptr;
+
+       if (too_many_args(1, args, err, NULL))
+               return -1;
+
+       /* backend/frontend/default */
+       vptr = &h2_settings_log_errors;
+       if (strcmp(args[1], "none"))
+               *vptr = H2_ERR_LOG_ERR_NONE;
+       else if (strcmp(args[1], "connection"))
+               *vptr = H2_ERR_LOG_ERR_CONN;
+       else if (strcmp(args[1], "stream"))
+               *vptr = H2_ERR_LOG_ERR_STRM;
+       else {
+               memprintf(err, "'%s' expects 'none', 'connection', or 
'stream'", args[0]);
+               return -1;
+       }
+       return 0;
+}
+
 /* config parser for global "tune.h2.{be.,fe.,}max-concurrent-streams" */
 static int h2_parse_max_concurrent_streams(char **args, int section_type, 
struct proxy *curpx,
                                            const struct proxy *defpx, const 
char *file, int line,
@@ -8797,6 +8850,7 @@
        { CFG_GLOBAL, "tune.h2.fe.rxbuf",               h2_parse_rxbuf          
        },
        { CFG_GLOBAL, "tune.h2.header-table-size",      
h2_parse_header_table_size      },
        { CFG_GLOBAL, "tune.h2.initial-window-size",    
h2_parse_initial_window_size    },
+       { CFG_GLOBAL, "tune.h2.log-errors",             h2_parse_log_errors     
        },
        { CFG_GLOBAL, "tune.h2.max-concurrent-streams", 
h2_parse_max_concurrent_streams },
        { CFG_GLOBAL, "tune.h2.max-frame-size",         h2_parse_max_frame_size 
        },
        { CFG_GLOBAL, "tune.h2.zero-copy-fwd-send",     
h2_parse_zero_copy_fwd_snd },
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/qpack-dec.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/qpack-dec.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/qpack-dec.c    2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/qpack-dec.c    2026-03-09 
16:03:05.000000000 +0100
@@ -63,31 +63,37 @@
        uint64_t ret = 0;
        int len = *len_in;
        const uint8_t *raw = *buf;
+       uint64_t v, max = ~0;
        uint8_t shift = 0;
 
+       if (len == 0)
+               goto too_short;
+
        len--;
        ret = *raw++ & ((1ULL << b) - 1);
        if (ret != (uint64_t)((1ULL << b) - 1))
                goto end;
 
-       while (len && (*raw & 128)) {
-               ret += ((uint64_t)*raw++ & 127) << shift;
-               shift += 7;
+       do {
+               if (!len)
+                       goto too_short;
+               v = *raw++;
                len--;
-       }
-
-       /* last 7 bits */
-       if (!len)
-               goto too_short;
-
-       len--;
-       ret += ((uint64_t)*raw++ & 127) << shift;
+               if (v & 127) { // make UBSan happy
+                       if ((v & 127) > max)
+                               goto too_large;
+                       ret += (v & 127) << shift;
+               }
+               max >>= 7;
+               shift += 7;
+       } while (v & 128);
 
  end:
        *buf = raw;
        *len_in = len;
        return ret;
 
+ too_large:
  too_short:
        *len_in = (uint64_t)-1;
        return 0;
@@ -220,6 +226,13 @@
        if (*len == (uint64_t)-1)
                return -QPACK_RET_RIC;
 
+       /* Ensure at least one byte remains for the sign bit
+        * and the start of the Delta Base varint.
+        */
+       if (!*len)
+               return -QPACK_RET_TRUNCATED;
+
+       /* Safe access to the sign bit thanks to the check above */
        *sign_bit = **raw & 0x8;
        *db = qpack_get_varint(raw, len, 7);
        if (*len == (uint64_t)-1)
@@ -419,6 +432,12 @@
                                qpack_debug_printf(stderr, "##ERR@%d\n", 
__LINE__);
                                ret = -QPACK_RET_TRUNCATED;
                                goto out;
+                       }
+
+                       if (len < length) {
+                               qpack_debug_printf(stderr, "##ERR@%d\n", 
__LINE__);
+                               ret = -QPACK_RET_TRUNCATED;
+                               goto out;
                        }
 
                        qpack_debug_printf(stderr, " h=%d length=%llu", !!h, 
(unsigned long long)length);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/quic_tp.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/quic_tp.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/quic_tp.c      2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/quic_tp.c      2026-03-09 
16:03:05.000000000 +0100
@@ -168,7 +168,7 @@
 
        addr->cid.len = *(*buf)++;
        if (addr->cid.len) {
-               if (end - sizeof(addr->stateless_reset_token) - *buf > 
addr->cid.len ||
+               if (end - *buf < addr->cid.len + 
sizeof(addr->stateless_reset_token) ||
                    addr->cid.len > sizeof(addr->cid.data)) {
                        return 0;
                }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/resolvers.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/resolvers.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/resolvers.c    2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/resolvers.c    2026-03-09 
16:03:05.000000000 +0100
@@ -648,8 +648,9 @@
 
                /* +1 to take label len + label string */
                label_len++;
-
-               for (n = 0; n < label_len; n++) {
+               *dest = *reader; /* copy label len */
+               /* copy lowered label string */
+               for (n = 1; n < label_len; n++) {
                        dest[n] = tolower(reader[n]);
                }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/sample.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/sample.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/sample.c       2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/sample.c       2026-03-09 
16:03:05.000000000 +0100
@@ -5446,7 +5446,7 @@
 
        if (kw[2] == 'q') /* req.bytes_in or req.bytes_out */
                smp->data.u.sint = (kw[10] == 'i') ? logs->req_in : 
logs->req_out;
-       if (kw[2] == 's') /* res.bytes_in or res.bytes_out */
+       else if (kw[2] == 's') /* res.bytes_in or res.bytes_out */
                smp->data.u.sint = (kw[10] == 'i') ? logs->res_in : 
logs->res_out;
        else /* bytes_in or bytes_out */
                smp->data.u.sint = (kw[6] == 'i') ? logs->req_in : logs->res_in;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/ssl_sample.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/ssl_sample.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/ssl_sample.c   2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/ssl_sample.c   2026-03-09 
16:03:05.000000000 +0100
@@ -147,9 +147,14 @@
        mdctx = EVP_MD_CTX_new();
        if (!mdctx)
                return 0;
-       EVP_DigestInit_ex(mdctx, evp, NULL);
-       EVP_DigestUpdate(mdctx, smp->data.u.str.area, smp->data.u.str.data);
-       EVP_DigestFinal_ex(mdctx, (unsigned char*)trash->area, &digest_length);
+
+       if (!EVP_DigestInit_ex(mdctx, evp, NULL) ||
+           !EVP_DigestUpdate(mdctx, smp->data.u.str.area, 
smp->data.u.str.data) ||
+           !EVP_DigestFinal_ex(mdctx, (unsigned char*)trash->area, 
&digest_length)) {
+               EVP_MD_CTX_free(mdctx);
+               return 0;
+       }
+
        trash->data = digest_length;
 
        EVP_MD_CTX_free(mdctx);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/stream.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/stream.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/stream.c       2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/stream.c       2026-03-09 
16:03:05.000000000 +0100
@@ -1090,10 +1090,8 @@
                        return ACT_RET_ERR;
 
                /* Finish initialisation of the context. */
-               s->current_rule = rule;
                if (appctx_init(appctx) == -1)
                        return ACT_RET_ERR;
-               s->current_rule = NULL;
        }
        else
                appctx = __sc_appctx(s->scb);
@@ -3766,7 +3764,9 @@
 
        if (strm->current_rule_list && strm->current_rule) {
                const struct act_rule *rule = strm->current_rule;
-               chunk_appendf(buf, "%s  current_rule=\"%s\" [%s:%d]\n", pfx, 
rule->kw->kw, rule->conf.file, rule->conf.line);
+               chunk_appendf(buf, "%s  current_rule=\"%s\" [%s:%d] (%s)\n",
+                             pfx, rule->kw->kw, rule->conf.file, 
rule->conf.line,
+                             (rule == strm->waiting_entity.ptr) ? "YIELDING" : 
"RUNNING");
        }
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/haproxy-3.3.4+git0.c2bffae0a/src/tcp_rules.c 
new/haproxy-3.3.5+git0.f0a2d1bf5/src/tcp_rules.c
--- old/haproxy-3.3.4+git0.c2bffae0a/src/tcp_rules.c    2026-02-19 
14:03:31.000000000 +0100
+++ new/haproxy-3.3.5+git0.f0a2d1bf5/src/tcp_rules.c    2026-03-09 
16:03:05.000000000 +0100
@@ -155,11 +155,12 @@
   restart:
        list_for_each_entry(rule, s->current_rule_list, list) {
  resume_rule:
+               s->current_rule = rule;
+
                /* check if budget is exceeded and we need to continue on the 
next
                 * polling loop, unless we know that we cannot yield
                 */
                if (s->rules_bcount++ >= global.tune.max_rules_at_once && 
!(act_opts & ACT_OPT_FINAL)) {
-                       s->current_rule = rule;
                        s->flags |= SF_RULE_FYIELD;
                        task_wakeup(s->task, TASK_WOKEN_MSG);
                        goto missing_data;
@@ -169,8 +170,10 @@
                        enum acl_test_res ret = ACL_TEST_PASS;
 
                        ret = acl_exec_cond(rule->cond, s->be, sess, s, 
SMP_OPT_DIR_REQ | partial);
-                       if (ret == ACL_TEST_MISS)
+                       if (ret == ACL_TEST_MISS) {
+                               s->current_rule = NULL;
                                goto missing_data;
+                       }
 
                        ret = acl_pass(ret);
                        if (rule->cond->pol == ACL_COND_UNLESS)
@@ -193,7 +196,6 @@
                                        s->last_entity.ptr  = rule;
                                        goto end;
                                case ACT_RET_YIELD:
-                                       s->current_rule = rule;
                                        if (act_opts & ACT_OPT_FINAL) {
                                                send_log(s->be, LOG_WARNING,
                                                         "Internal error: yield 
not allowed if the inspect-delay expired "
@@ -241,6 +243,7 @@
 
        if (def_rules && s->current_rule_list == def_rules) {
                s->current_rule_list = rules;
+               s->current_rule = NULL;
                goto restart;
        }
 
@@ -354,7 +357,6 @@
                int forced = s->flags & SF_RULE_FYIELD;
 
                rule = s->current_rule;
-               s->current_rule = NULL;
                s->flags &= ~SF_RULE_FYIELD;
                if (!(rep->flags & SC_FL_ERROR) && !(rep->flags & 
(CF_READ_TIMEOUT|CF_WRITE_TIMEOUT))) {
                        s->waiting_entity.type = STRM_ENTITY_NONE;
@@ -371,11 +373,12 @@
   restart:
        list_for_each_entry(rule, s->current_rule_list, list) {
  resume_rule:
+               s->current_rule = rule;
+
                /* check if budget is exceeded and we need to continue on the 
next
                 * polling loop, unless we know that we cannot yield
                 */
                if (s->rules_bcount++ >= global.tune.max_rules_at_once && 
!(act_opts & ACT_OPT_FINAL)) {
-                       s->current_rule = rule;
                        s->flags |= SF_RULE_FYIELD;
                        task_wakeup(s->task, TASK_WOKEN_MSG);
                        goto missing_data;
@@ -385,8 +388,10 @@
                        enum acl_test_res ret = ACL_TEST_PASS;
 
                        ret = acl_exec_cond(rule->cond, s->be, sess, s, 
SMP_OPT_DIR_RES | partial);
-                       if (ret == ACL_TEST_MISS)
+                       if (ret == ACL_TEST_MISS) {
+                               s->current_rule = NULL;
                                goto missing_data;
+                       }
 
                        ret = acl_pass(ret);
                        if (rule->cond->pol == ACL_COND_UNLESS)
@@ -409,7 +414,6 @@
                                        s->last_entity.ptr  = rule;
                                        goto end;
                                case ACT_RET_YIELD:
-                                       s->current_rule = rule;
                                        if (act_opts & ACT_OPT_FINAL) {
                                                send_log(s->be, LOG_WARNING,
                                                         "Internal error: yield 
not allowed if the inspect-delay expired "
@@ -466,6 +470,7 @@
 
        if (def_rules && s->current_rule_list == def_rules) {
                s->current_rule_list = rules;
+               s->current_rule = NULL;
                goto restart;
        }
 

Reply via email to