[PATCH] DOC: Update UUID references to RFC 9562

2024-05-12 Thread Tim Duesterhus
When support for UUIDv7 was added in commit
aab6477b67415c4cc260bba5df359fa2e6f49733
the specification still was a draft.

It has since been published as RFC 9562.

This patch updates all UUID references from the obsoleted RFC 4122 and the
draft for RFC 9562 to the published RFC 9562.
---
 doc/configuration.txt | 2 +-
 src/sample.c  | 2 +-
 src/tools.c   | 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/doc/configuration.txt b/doc/configuration.txt
index f0168b0ea8..7732843427 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -21314,7 +21314,7 @@ txn.sess_term_state : string
   http-after-response set-status 429 if { txn.sess_term_state  "sQ" }
 
 uuid([]) : string
-  Returns a UUID following the RFC4122 standard. If the version is not
+  Returns a UUID following the RFC 9562 standard. If the version is not
   specified, a UUID version 4 (fully random) is returned.
 
   Versions 4 and 7 are supported.
diff --git a/src/sample.c b/src/sample.c
index 43ab003529..49017fe56b 100644
--- a/src/sample.c
+++ b/src/sample.c
@@ -4795,7 +4795,7 @@ static int smp_check_uuid(struct arg *args, char **err)
return 1;
 }
 
-// Generate a RFC4122 UUID (default is v4 = fully random)
+// Generate a RFC 9562 UUID (default is v4 = fully random)
 static int smp_fetch_uuid(const struct arg *args, struct sample *smp, const 
char *kw, void *private)
 {
long long int type = -1;
diff --git a/src/tools.c b/src/tools.c
index 61e0eaf6ff..1f67665237 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -5739,8 +5739,8 @@ void ha_random_jump96(uint32_t dist)
}
 }
 
-/* Generates an RFC4122 version 4 UUID into chunk  which must be at 
least 37
- * bytes large.
+/* Generates an RFC 9562 version 4 UUID into chunk
+ *  which must be at least 37 bytes large.
  */
 void ha_generate_uuid_v4(struct buffer *output)
 {
@@ -5763,7 +5763,7 @@ void ha_generate_uuid_v4(struct buffer *output)
 (long long)((rnd[2] >> 14u) | ((uint64_t) rnd[3] << 18u)) 
& 0xull);
 }
 
-/* Generates a draft-ietf-uuidrev-rfc4122bis-14 version 7 UUID into chunk
+/* Generates an RFC 9562 version 7 UUID into chunk
  *  which must be at least 37 bytes large.
  */
 void ha_generate_uuid_v7(struct buffer *output)
-- 
2.43.2




[PATCH 2/3] MINOR: Add `ha_generate_uuid_v7`

2024-04-19 Thread Tim Duesterhus
This function generates a version 7 UUID as per
draft-ietf-uuidrev-rfc4122bis-14.
---
 include/haproxy/tools.h |  1 +
 src/tools.c | 25 +
 2 files changed, 26 insertions(+)

diff --git a/include/haproxy/tools.h b/include/haproxy/tools.h
index cbce218dc6..4e1a6dfa3f 100644
--- a/include/haproxy/tools.h
+++ b/include/haproxy/tools.h
@@ -1057,6 +1057,7 @@ int parse_dotted_uints(const char *s, unsigned int 
**nums, size_t *sz);
 
 /* PRNG */
 void ha_generate_uuid_v4(struct buffer *output);
+void ha_generate_uuid_v7(struct buffer *output);
 void ha_random_seed(const unsigned char *seed, size_t len);
 void ha_random_jump96(uint32_t dist);
 uint64_t ha_random64(void);
diff --git a/src/tools.c b/src/tools.c
index f3b095569a..114cbf6ddf 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -5605,6 +5605,31 @@ void ha_generate_uuid_v4(struct buffer *output)
 (long long)((rnd[2] >> 14u) | ((uint64_t) rnd[3] << 18u)) 
& 0xull);
 }
 
+/* Generates a draft-ietf-uuidrev-rfc4122bis-14 version 7 UUID into chunk
+ *  which must be at least 37 bytes large.
+ */
+void ha_generate_uuid_v7(struct buffer *output)
+{
+   uint32_t rnd[3];
+   uint64_t last;
+   uint64_t time;
+
+   time = (date.tv_sec * 1000) + (date.tv_usec / 1000);
+   last = ha_random64();
+   rnd[0] = last;
+   rnd[1] = last >> 32;
+
+   last = ha_random64();
+   rnd[2] = last;
+
+   chunk_printf(output, "%8.8x-%4.4x-%4.4x-%4.4x-%12.12llx",
+(uint)(time >> 16u),
+(uint)(time & 0x),
+((rnd[0] >> 16u) & 0xFFF) | 0x7000,  // highest 4 bits 
indicate the uuid version
+(rnd[1] & 0x3FFF) | 0x8000,  // the highest 2 bits 
indicate the UUID variant (10),
+(long long)((rnd[1] >> 14u) | ((uint64_t) rnd[2] << 18u)) 
& 0xull);
+}
+
 
 /* only used by parse_line() below. It supports writing in place provided that
  *  is updated to the next location before calling it. In that case, the
-- 
2.43.2




[PATCH 3/3] MINOR: Add support for UUIDv7 to the `uuid` sample fetch

2024-04-19 Thread Tim Duesterhus
This adds support for UUIDv7 to the existing `uuid` sample fetch that was added
in 8a694b859cf98f8b0855b4aa5a50ebf64b501215.
---
 doc/configuration.txt |  3 ++-
 src/sample.c  | 40 +---
 2 files changed, 31 insertions(+), 12 deletions(-)

diff --git a/doc/configuration.txt b/doc/configuration.txt
index d2d654c191..16094c194a 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -21306,7 +21306,8 @@ txn.sess_term_state : string
 uuid([]) : string
   Returns a UUID following the RFC4122 standard. If the version is not
   specified, a UUID version 4 (fully random) is returned.
-  Currently, only version 4 is supported.
+
+  Versions 4 and 7 are supported.
 
 var([,]) : undefined
   Returns a variable with the stored type. If the variable is not set, the
diff --git a/src/sample.c b/src/sample.c
index 8daa92424d..43ab003529 100644
--- a/src/sample.c
+++ b/src/sample.c
@@ -4781,10 +4781,15 @@ static int smp_check_uuid(struct arg *args, char **err)
if (!args[0].type) {
args[0].type = ARGT_SINT;
args[0].data.sint = 4;
-   }
-   else if (args[0].data.sint != 4) {
-   memprintf(err, "Unsupported UUID version: '%lld'", 
args[0].data.sint);
-   return 0;
+   } else {
+   switch (args[0].data.sint) {
+   case 4:
+   case 7:
+   break;
+   default:
+   memprintf(err, "Unsupported UUID version: '%lld'", 
args[0].data.sint);
+   return 0;
+   }
}
 
return 1;
@@ -4793,16 +4798,29 @@ static int smp_check_uuid(struct arg *args, char **err)
 // Generate a RFC4122 UUID (default is v4 = fully random)
 static int smp_fetch_uuid(const struct arg *args, struct sample *smp, const 
char *kw, void *private)
 {
-   if (args[0].data.sint == 4 || !args[0].type) {
+   long long int type = -1;
+
+   if (!args[0].type) {
+   type = 4;
+   } else {
+   type = args[0].data.sint;
+   }
+
+   switch (type) {
+   case 4:
ha_generate_uuid_v4();
-   smp->data.type = SMP_T_STR;
-   smp->flags = SMP_F_VOL_TEST | SMP_F_MAY_CHANGE;
-   smp->data.u.str = trash;
-   return 1;
+   break;
+   case 7:
+   ha_generate_uuid_v7();
+   break;
+   default:
+   return 0;
}
 
-   // more implementations of other uuid formats possible here
-   return 0;
+   smp->data.type = SMP_T_STR;
+   smp->flags = SMP_F_VOL_TEST | SMP_F_MAY_CHANGE;
+   smp->data.u.str = trash;
+   return 1;
 }
 
 /* Check if QUIC support was compiled and was not disabled by "no-quic" global 
option */
-- 
2.43.2




[PATCH 0/3] Add support for UUIDv7

2024-04-19 Thread Tim Duesterhus
Willy,

as requested in the thread "[ANNOUNCE] haproxy-3.0-dev7":

> Regarding UUIDs, though, I've recently come across UUIDv7 which I found
> particularly interesting, and that I think would be nice to implement
> in the uuid() sample fetch function before 3.0 is released.

No reg-tests added, as those doesn't allow meaningfully testing that the
UUIDv7 is actually a UUIDv7. I have manually checked the output against
https://uuid7.com/.

Best regards

Tim Duesterhus (3):
  MINOR: tools: Rename `ha_generate_uuid` to `ha_generate_uuid_v4`
  MINOR: Add `ha_generate_uuid_v7`
  MINOR: Add support for UUIDv7 to the `uuid` sample fetch

 addons/ot/src/scope.c   |  2 +-
 doc/configuration.txt   |  3 ++-
 include/haproxy/tools.h |  3 ++-
 src/flt_spoe.c  |  2 +-
 src/sample.c| 42 +
 src/tools.c | 29 ++--
 6 files changed, 63 insertions(+), 18 deletions(-)

-- 
2.43.2




[PATCH 1/3] MINOR: tools: Rename `ha_generate_uuid` to `ha_generate_uuid_v4`

2024-04-19 Thread Tim Duesterhus
This is in preparation of adding support for other UUID versions.
---
 addons/ot/src/scope.c   | 2 +-
 include/haproxy/tools.h | 2 +-
 src/flt_spoe.c  | 2 +-
 src/sample.c| 2 +-
 src/tools.c | 4 ++--
 5 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/addons/ot/src/scope.c b/addons/ot/src/scope.c
index efe8fe29f6..8a4c02f3cf 100644
--- a/addons/ot/src/scope.c
+++ b/addons/ot/src/scope.c
@@ -113,7 +113,7 @@ struct flt_ot_runtime_context 
*flt_ot_runtime_context_init(struct stream *s, str
LIST_INIT(&(retptr->contexts));
 
uuid = b_make(retptr->uuid, sizeof(retptr->uuid), 0, 0);
-   ha_generate_uuid();
+   ha_generate_uuid_v4();
 
 #ifdef USE_OT_VARS
/*
diff --git a/include/haproxy/tools.h b/include/haproxy/tools.h
index 11082379c8..cbce218dc6 100644
--- a/include/haproxy/tools.h
+++ b/include/haproxy/tools.h
@@ -1056,7 +1056,7 @@ static inline void *my_realloc2(void *ptr, size_t size)
 int parse_dotted_uints(const char *s, unsigned int **nums, size_t *sz);
 
 /* PRNG */
-void ha_generate_uuid(struct buffer *output);
+void ha_generate_uuid_v4(struct buffer *output);
 void ha_random_seed(const unsigned char *seed, size_t len);
 void ha_random_jump96(uint32_t dist);
 uint64_t ha_random64(void);
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index 3fc058c283..b180ba2dd5 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -249,7 +249,7 @@ static const char 
*spoe_appctx_state_str[SPOE_APPCTX_ST_END+1] = {
 static char *
 generate_pseudo_uuid()
 {
-   ha_generate_uuid();
+   ha_generate_uuid_v4();
return my_strndup(trash.area, trash.data);
 }
 
diff --git a/src/sample.c b/src/sample.c
index 334782c173..8daa92424d 100644
--- a/src/sample.c
+++ b/src/sample.c
@@ -4794,7 +4794,7 @@ static int smp_check_uuid(struct arg *args, char **err)
 static int smp_fetch_uuid(const struct arg *args, struct sample *smp, const 
char *kw, void *private)
 {
if (args[0].data.sint == 4 || !args[0].type) {
-   ha_generate_uuid();
+   ha_generate_uuid_v4();
smp->data.type = SMP_T_STR;
smp->flags = SMP_F_VOL_TEST | SMP_F_MAY_CHANGE;
smp->data.u.str = trash;
diff --git a/src/tools.c b/src/tools.c
index 09de5db201..f3b095569a 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -5581,10 +5581,10 @@ void ha_random_jump96(uint32_t dist)
}
 }
 
-/* Generates an RFC4122 UUID into chunk  which must be at least 37
+/* Generates an RFC4122 version 4 UUID into chunk  which must be at 
least 37
  * bytes large.
  */
-void ha_generate_uuid(struct buffer *output)
+void ha_generate_uuid_v4(struct buffer *output)
 {
uint32_t rnd[4];
uint64_t last;
-- 
2.43.2




[PATCH] MINOR: systemd: Include MONOTONIC_USEC field in RELOADING=1 message

2024-04-03 Thread Tim Duesterhus
As per the `sd_notify` manual:

> A field carrying the monotonic timestamp (as per CLOCK_MONOTONIC) formatted
> in decimal in μs, when the notification message was generated by the client.
> This is typically used in combination with "RELOADING=1", to allow the
> service manager to properly synchronize reload cycles. See systemd.service(5)
> for details, specifically "Type=notify-reload".

Thus this change allows users with a recent systemd to switch to
`Type=notify-reload`, should they desire to do so. Correct behavior was
verified with a Fedora 39 VM.

see systemd/systemd#25916
---
 src/haproxy.c | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/src/haproxy.c b/src/haproxy.c
index 0fcc3e5416..a5f1e79ef9 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -844,8 +844,17 @@ void mworker_reload(int hardreload)
}
 
 #if defined(USE_SYSTEMD)
-   if (global.tune.options & GTUNE_USE_SYSTEMD)
-   sd_notify(0, "RELOADING=1\nSTATUS=Reloading Configuration.\n");
+   if (global.tune.options & GTUNE_USE_SYSTEMD) {
+   struct timespec ts;
+
+   (void)clock_gettime(CLOCK_MONOTONIC, );
+
+   sd_notifyf(0,
+  "RELOADING=1\n"
+  "STATUS=Reloading Configuration.\n"
+  "MONOTONIC_USEC=%" PRIu64 "\n",
+  (ts.tv_sec * 100ULL + ts.tv_nsec / 1000ULL));
+   }
 #endif
mworker_reexec(hardreload);
 }
-- 
2.43.2




[PATCH 2/4] CLEANUP: Reapply strcmp.cocci (2)

2024-03-29 Thread Tim Duesterhus
This reapplies strcmp.cocci across the whole src/ tree.
---
 src/event_hdl.c | 2 +-
 src/hlua_fcn.c  | 8 
 src/sample.c| 2 +-
 src/tcp_act.c   | 4 ++--
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/event_hdl.c b/src/event_hdl.c
index f5bb5b6e7e..f4f7b19e4d 100644
--- a/src/event_hdl.c
+++ b/src/event_hdl.c
@@ -138,7 +138,7 @@ struct event_hdl_sub_type 
event_hdl_string_to_sub_type(const char *name)
int it;
 
for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / 
sizeof(event_hdl_sub_type_map[0])); it++) {
-   if (!strcmp(name, event_hdl_sub_type_map[it].name))
+   if (strcmp(name, event_hdl_sub_type_map[it].name) == 0)
return event_hdl_sub_type_map[it].type;
}
return EVENT_HDL_SUB_NONE;
diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c
index a13e0f5f41..7aaab3a381 100644
--- a/src/hlua_fcn.c
+++ b/src/hlua_fcn.c
@@ -1329,14 +1329,14 @@ static int hlua_server_index(struct lua_State *L)
 {
const char *key = lua_tostring(L, 2);
 
-   if (!strcmp(key, "name")) {
+   if (strcmp(key, "name") == 0) {
if (ONLY_ONCE())
ha_warning("hlua: use of server 'name' attribute is 
deprecated and will eventually be removed, please use get_name() function 
instead: %s\n", hlua_traceback(L, ", "));
lua_pushvalue(L, 1);
hlua_server_get_name(L);
return 1;
}
-   if (!strcmp(key, "puid")) {
+   if (strcmp(key, "puid") == 0) {
if (ONLY_ONCE())
ha_warning("hlua: use of server 'puid' attribute is 
deprecated and will eventually be removed, please use get_puid() function 
instead: %s\n", hlua_traceback(L, ", "));
lua_pushvalue(L, 1);
@@ -1980,14 +1980,14 @@ static int hlua_proxy_index(struct lua_State *L)
 {
const char *key = lua_tostring(L, 2);
 
-   if (!strcmp(key, "name")) {
+   if (strcmp(key, "name") == 0) {
if (ONLY_ONCE())
ha_warning("hlua: use of proxy 'name' attribute is 
deprecated and will eventually be removed, please use get_name() function 
instead: %s\n", hlua_traceback(L, ", "));
lua_pushvalue(L, 1);
hlua_proxy_get_name(L);
return 1;
}
-   if (!strcmp(key, "uuid")) {
+   if (strcmp(key, "uuid") == 0) {
if (ONLY_ONCE())
ha_warning("hlua: use of proxy 'uuid' attribute is 
deprecated and will eventually be removed, please use get_uuid() function 
instead: %s\n", hlua_traceback(L, ", "));
lua_pushvalue(L, 1);
diff --git a/src/sample.c b/src/sample.c
index cbb959161b..8f46d31b96 100644
--- a/src/sample.c
+++ b/src/sample.c
@@ -69,7 +69,7 @@ int type_to_smp(const char *type)
int it = 0;
 
while (it < SMP_TYPES) {
-   if (!strcmp(type, smp_to_type[it]))
+   if (strcmp(type, smp_to_type[it]) == 0)
break; // found
it += 1;
}
diff --git a/src/tcp_act.c b/src/tcp_act.c
index 1cefc90dcd..a88fab4afe 100644
--- a/src/tcp_act.c
+++ b/src/tcp_act.c
@@ -678,7 +678,7 @@ static enum act_parse_ret tcp_parse_set_mark(const char 
**args, int *orig_arg, s
}
 
/* Register processing function. */
-   if (!strcmp("set-bc-mark", args[cur_arg - 1]))
+   if (strcmp("set-bc-mark", args[cur_arg - 1]) == 0)
rule->action_ptr = tcp_action_set_bc_mark;
else
rule->action_ptr = tcp_action_set_fc_mark; // fc mark
@@ -740,7 +740,7 @@ static enum act_parse_ret tcp_parse_set_tos(const char 
**args, int *orig_arg, st
}
 
/* Register processing function. */
-   if (!strcmp("set-bc-tos", args[cur_arg - 1]))
+   if (strcmp("set-bc-tos", args[cur_arg - 1]) == 0)
rule->action_ptr = tcp_action_set_bc_tos;
else
rule->action_ptr = tcp_action_set_fc_tos; // fc tos
-- 
2.43.2




[PATCH 3/4] CLEANUP: Reapply xalloc_cast.cocci

2024-03-29 Thread Tim Duesterhus
This reapplies xalloc_cast.cocci across the whole src/ tree.
---
 src/cpuset.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/cpuset.c b/src/cpuset.c
index 82e350f132..a20b81a25d 100644
--- a/src/cpuset.c
+++ b/src/cpuset.c
@@ -280,7 +280,7 @@ int cpu_map_configured(void)
 static int cpuset_alloc(void)
 {
/* allocate the structures used to store CPU topology info */
-   cpu_map = (struct cpu_map*)calloc(MAX_TGROUPS, sizeof(*cpu_map));
+   cpu_map = calloc(MAX_TGROUPS, sizeof(*cpu_map));
if (!cpu_map)
return 0;
 
-- 
2.43.2




[PATCH 1/4] CLEANUP: Reapply ist.cocci (3)

2024-03-29 Thread Tim Duesterhus
This reapplies ist.cocci across the whole src/ tree.
---
 src/resolvers.c   | 4 ++--
 src/stick_table.c | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/resolvers.c b/src/resolvers.c
index f97fb29b01..d68208555f 100644
--- a/src/resolvers.c
+++ b/src/resolvers.c
@@ -3947,9 +3947,9 @@ static int rslv_promex_fill_ts(void *unused, void 
*metric_ctx, unsigned int id,
int ret;
 
labels[0].name  = ist("resolver");
-   labels[0].value = ist2(resolver->id, strlen(resolver->id));
+   labels[0].value = ist(resolver->id);
labels[1].name  = ist("nameserver");
-   labels[1].value = ist2(ns->id, strlen(ns->id));
+   labels[1].value = ist(ns->id);
 
ret = resolv_fill_stats(ns->counters, stats, );
if (ret == 1)
diff --git a/src/stick_table.c b/src/stick_table.c
index 964542cdea..2f75359291 100644
--- a/src/stick_table.c
+++ b/src/stick_table.c
@@ -5947,9 +5947,9 @@ static int stk_promex_fill_ts(void *unused, void 
*metric_ctx, unsigned int id, s
return 0;
 
labels[0].name  = ist("name");
-   labels[0].value = ist2(t->id, strlen(t->id));
+   labels[0].value = ist(t->id);
labels[1].name  = ist("type");
-   labels[1].value = ist2(stktable_types[t->type].kw, 
strlen(stktable_types[t->type].kw));
+   labels[1].value = ist(stktable_types[t->type].kw);
 
switch (id) {
case STICKTABLE_SIZE:
-- 
2.43.2




[PATCH 2/2] REGTESTS: Remove REQUIRE_VERSION=1.9 from all tests (2)

2024-03-29 Thread Tim Duesterhus
see also:

2a5fb62ad REGTESTS: Remove REQUIRE_VERSION=1.9 from all tests
---
 reg-tests/http-messaging/truncated.vtc | 1 -
 1 file changed, 1 deletion(-)

diff --git a/reg-tests/http-messaging/truncated.vtc 
b/reg-tests/http-messaging/truncated.vtc
index 7579f6d763..7f262d75dc 100644
--- a/reg-tests/http-messaging/truncated.vtc
+++ b/reg-tests/http-messaging/truncated.vtc
@@ -1,5 +1,4 @@
 varnishtest "HTTP response size tests: H2->H1 (HTX and legacy mode)"
-#REQUIRE_VERSION=1.9
 
 feature ignore_unknown_macro
 
-- 
2.43.2




[PATCH 1/2] REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+ (4)

2024-03-29 Thread Tim Duesterhus
Introduced in:

dfb1cea69 REGTESTS: promex: Adapt script to be less verbose
36d936dd1 REGTESTS: write a full reverse regtest
b57f15158 REGTESTS: provide a reverse-server test with name argument
f0bff2947 REGTESTS: provide a reverse-server test

see also:

fbbbc33df REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+
---
 reg-tests/connection/reverse_connect_full.vtc | 2 +-
 reg-tests/connection/reverse_server.vtc   | 2 +-
 reg-tests/connection/reverse_server_name.vtc  | 2 +-
 reg-tests/contrib/prometheus.vtc  | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/reg-tests/connection/reverse_connect_full.vtc 
b/reg-tests/connection/reverse_connect_full.vtc
index 238831fc38..cc88382ced 100644
--- a/reg-tests/connection/reverse_connect_full.vtc
+++ b/reg-tests/connection/reverse_connect_full.vtc
@@ -1,7 +1,7 @@
 varnishtest "Reverse connect full test"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.9
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.9-dev0)'"
 
 server s1 {
 rxreq
diff --git a/reg-tests/connection/reverse_server.vtc 
b/reg-tests/connection/reverse_server.vtc
index 50fe8ceb80..5cd77ca7bb 100644
--- a/reg-tests/connection/reverse_server.vtc
+++ b/reg-tests/connection/reverse_server.vtc
@@ -1,7 +1,7 @@
 varnishtest "Reverse server test"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.9
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.9-dev0)'"
 
 barrier b1 cond 2
 
diff --git a/reg-tests/connection/reverse_server_name.vtc 
b/reg-tests/connection/reverse_server_name.vtc
index 0fd850fe8f..3a24601743 100644
--- a/reg-tests/connection/reverse_server_name.vtc
+++ b/reg-tests/connection/reverse_server_name.vtc
@@ -2,7 +2,7 @@ varnishtest "Reverse server with a name parameter test"
 feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.9
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.9-dev0)'"
 
 barrier b1 cond 2
 
diff --git a/reg-tests/contrib/prometheus.vtc b/reg-tests/contrib/prometheus.vtc
index 60217c2a0e..89d65d7b74 100644
--- a/reg-tests/contrib/prometheus.vtc
+++ b/reg-tests/contrib/prometheus.vtc
@@ -1,6 +1,6 @@
 varnishtest "prometheus exporter test"
 
-#REQUIRE_VERSION=3.0
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.0-dev0)'"
 #REQUIRE_SERVICES=prometheus-exporter
 
 feature ignore_unknown_macro
-- 
2.43.2




[PATCH] CI: Update to actions/cache@v4

2024-02-08 Thread Tim Duesterhus
No functional change, but this upgrade is required, due to the v3 runtime being
deprecated:

> Node.js 16 actions are deprecated. Please update the following actions to use
> Node.js 20: actions/cache@v3. For more information see:
> https://github.blog/changelog/2023-09-22-github-actions-transitioning-from-node-16-to-node-20/.
---
 .github/workflows/aws-lc.yml | 2 +-
 .github/workflows/vtest.yml  | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/aws-lc.yml b/.github/workflows/aws-lc.yml
index 34a803026f..e59ba6 100644
--- a/.github/workflows/aws-lc.yml
+++ b/.github/workflows/aws-lc.yml
@@ -24,7 +24,7 @@ jobs:
   echo "result=$result" >> $GITHUB_OUTPUT
   - name: Cache AWS-LC
 id: cache_aws_lc
-uses: actions/cache@v3
+uses: actions/cache@v4
 with:
   path: '~/opt/'
   key: ssl-${{ steps.get_aws_lc_release.outputs.result 
}}-Ubuntu-latest-gcc
diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
index 6246718726..7b5254b04b 100644
--- a/.github/workflows/vtest.yml
+++ b/.github/workflows/vtest.yml
@@ -60,7 +60,7 @@ jobs:
 - name: Cache SSL libs
   if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 
'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
   id: cache_ssl
-  uses: actions/cache@v3
+  uses: actions/cache@v4
   with:
 path: '~/opt/'
 key: ssl-${{ steps.generate-cache-key.outputs.key }}
@@ -68,7 +68,7 @@ jobs:
 - name: Cache OpenTracing
   if: ${{ contains(matrix.FLAGS, 'USE_OT=1') }}
   id: cache_ot
-  uses: actions/cache@v3
+  uses: actions/cache@v4
   with:
 path: '~/opt-ot/'
 key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ 
contains(matrix.name, 'ASAN') }}
-- 
2.43.0




[PATCH] DOC: config: add missing colon to "bytes_out" sample fetch keyword (2)

2023-11-30 Thread Tim Duesterhus
This reapplies 1eb049dc677f2de950158615ed3d8306ee5102d6, as the change was
accidentally reverted in 5ef48e063ecf992646c7af374153f106050fb8ec.
---
 doc/configuration.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/configuration.txt b/doc/configuration.txt
index 9d3d92d276..4c8ed10f45 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -22299,7 +22299,7 @@ baseq : string
 bytes_in : integer
   This returns the number of bytes uploaded from the client to the server.
 
-bytes_out integer
+bytes_out : integer
This is the number of bytes transmitted from the server to the client.
 
 capture.req.hdr() : string
-- 
2.42.0




[PATCH 1/3] REGTESTS: sample: Test the behavior of consecutive delimiters for the field converter

2023-11-30 Thread Tim Duesterhus
This is in preparation of a follow-up patch to fix the word converter.
---
 reg-tests/converter/field.vtc | 4 
 1 file changed, 4 insertions(+)

diff --git a/reg-tests/converter/field.vtc b/reg-tests/converter/field.vtc
index 1243728c01..3b1d8198e3 100644
--- a/reg-tests/converter/field.vtc
+++ b/reg-tests/converter/field.vtc
@@ -32,6 +32,10 @@ client c1 -connect ${h1_fe_sock} {
rxresp
expect resp.status == 200
expect resp.http.found == "bar"
+   txreq -url "/foo//bar/baz"
+   rxresp
+   expect resp.status == 200
+   expect resp.http.found == ""
txreq -url "/foo"
rxresp
expect resp.status == 200
-- 
2.42.0




[PATCH 2/3] BUG/MINOR: sample: Make the `word` converter compatible with `-m found`

2023-11-30 Thread Tim Duesterhus
Previously an expression like:

path,word(2,/) -m found

always returned `true`.

Bug exists since the `word` converter exists. That is:
c9a0f6d0232cf44d6b08d1964b9097a45a6c65f0

The same bug was previously fixed for the `field` converter in commit
4381d26edc03faa46401eb0fe82fd7be84be14fd.

The fix should be backported to 1.6+.
---
 reg-tests/converter/word.vtc | 43 
 src/sample.c |  2 +-
 2 files changed, 44 insertions(+), 1 deletion(-)
 create mode 100644 reg-tests/converter/word.vtc

diff --git a/reg-tests/converter/word.vtc b/reg-tests/converter/word.vtc
new file mode 100644
index 00..acd46781f5
--- /dev/null
+++ b/reg-tests/converter/word.vtc
@@ -0,0 +1,43 @@
+varnishtest "word converter Test"
+
+feature ignore_unknown_macro
+
+server s1 {
+   rxreq
+   txresp -hdr "Connection: close"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+defaults
+   mode http
+   timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+   timeout client  "${HAPROXY_TEST_TIMEOUT-5s}"
+   timeout server  "${HAPROXY_TEST_TIMEOUT-5s}"
+
+frontend fe
+   bind "fd@${fe}"
+
+    requests
+   http-request set-var(txn.uri) path
+   http-response set-header Found %[var(txn.uri),word(2,/)] if { 
var(txn.uri),word(2,/) -m found }
+
+   default_backend be
+
+backend be
+   server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+   txreq -url "/foo/bar/baz"
+   rxresp
+   expect resp.status == 200
+   expect resp.http.found == "bar"
+   txreq -url "/foo//bar/baz"
+   rxresp
+   expect resp.status == 200
+   expect resp.http.found == "bar"
+   txreq -url "/foo"
+   rxresp
+   expect resp.status == 200
+   expect resp.http.found == ""
+} -run
diff --git a/src/sample.c b/src/sample.c
index c8954ac476..29967e07d9 100644
--- a/src/sample.c
+++ b/src/sample.c
@@ -2964,7 +2964,7 @@ static int sample_conv_word(const struct arg *arg_p, 
struct sample *smp, void *p
/* Field not found */
if (word != arg_p[0].data.sint) {
smp->data.u.str.data = 0;
-   return 1;
+   return 0;
}
 found:
smp->data.u.str.data = end - start;
-- 
2.42.0




[PATCH 3/3] DOC: Clarify the differences between field() and word()

2023-11-30 Thread Tim Duesterhus
word() mentions that delimiters at the start and end are ignored, but it does
not mention that consecutive delimiters are merged.

May be backported as far as the patch applies.
---
 doc/configuration.txt | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/doc/configuration.txt b/doc/configuration.txt
index 324cce1325..981e4dcb62 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -17726,6 +17726,7 @@ field(,[,])
   fields.
 
   Example :
+  str(f1_f2_f3__f5),field(4,_)# 
   str(f1_f2_f3__f5),field(5,_)# f5
   str(f1_f2_f3__f5),field(2,_,0)  # f2_f3__f5
   str(f1_f2_f3__f5),field(2,_,2)  # f2_f3
@@ -18939,17 +18940,21 @@ word(,[,])
   Extracts the nth word counting from the beginning (positive index) or from
   the end (negative index) considering given delimiters from an input string.
   Indexes start at 1 or -1 and delimiters are a string formatted list of chars.
-  Delimiters at the beginning or end of the input string are ignored.
+  Empty words are skipped. This means that delimiters at the start or end of
+  the input string are ignored and consecutive delimiters within the input
+  string are considered to be a single delimiter.
   Optionally you can specify  of words to extract (default: 1).
   Value of 0 indicates extraction of all remaining words.
 
   Example :
   str(f1_f2_f3__f5),word(4,_)# f5
+  str(f1_f2_f3__f5),word(5,_)# 
   str(f1_f2_f3__f5),word(2,_,0)  # f2_f3__f5
   str(f1_f2_f3__f5),word(3,_,2)  # f3__f5
   str(f1_f2_f3__f5),word(-2,_,3) # f1_f2_f3
   str(f1_f2_f3__f5),word(-3,_,0) # f1_f2
   str(/f1/f2/f3/f4),word(1,/)# f1
+  str(/f1f2/f3/f4),word(1,/) # f2
 
 wt6([])
   Hashes a binary input sample into an unsigned 32-bit quantity using the WT6
-- 
2.42.0




[PATCH] CLEANUP: Re-apply xalloc_size.cocci (3)

2023-11-05 Thread Tim Duesterhus
This reapplies the xalloc_size.cocci patch across the whole `src/` tree.

see 16cc16dd8235e7eb6c38b7abd210bd1e1d96b1d9
see 63ee0e4c01b94aee5fc6c6dd98cfc4480ae5ea46
see 9fb57e8c175a0b852b06a0780f48eb8eaf321a47
---
 src/log.c| 3 ++-
 src/proto_quic.c | 2 +-
 src/server.c | 4 ++--
 src/tcpcheck.c   | 2 +-
 4 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/src/log.c b/src/log.c
index 8ada05089..3458f93f9 100644
--- a/src/log.c
+++ b/src/log.c
@@ -895,7 +895,8 @@ static int postcheck_log_backend(struct proxy *be)
/* alloc srv array (it will be used for active and backup server lists 
in turn,
 * so we ensure that the longest list will fit
 */
-   be->lbprm.log.srv = calloc(MAX(be->srv_act, be->srv_bck), sizeof(struct 
server *));
+   be->lbprm.log.srv = calloc(MAX(be->srv_act, be->srv_bck),
+  sizeof(*be->lbprm.log.srv));
 
if (!be->lbprm.log.srv ) {
memprintf(, "memory error when allocating server array (%d 
entries)",
diff --git a/src/proto_quic.c b/src/proto_quic.c
index 701dcb295..899cffebf 100644
--- a/src/proto_quic.c
+++ b/src/proto_quic.c
@@ -760,7 +760,7 @@ static int quic_alloc_dghdlrs(void)
MT_LIST_INIT(>dgrams);
}
 
-   quic_cid_trees = calloc(QUIC_CID_TREES_CNT, sizeof(struct 
quic_cid_tree));
+   quic_cid_trees = calloc(QUIC_CID_TREES_CNT, sizeof(*quic_cid_trees));
if (!quic_cid_trees) {
ha_alert("Failed to allocate global CIDs trees.\n");
return 0;
diff --git a/src/server.c b/src/server.c
index 6e9e19564..ca48f2875 100644
--- a/src/server.c
+++ b/src/server.c
@@ -1363,7 +1363,7 @@ static int srv_parse_set_proxy_v2_tlv_fmt(char **args, 
int *cur_arg,
}
}
 
-   srv_tlv = malloc(sizeof(struct srv_pp_tlv_list));
+   srv_tlv = malloc(sizeof(*srv_tlv));
if (unlikely(!srv_tlv)) {
memprintf(err, "'%s' : failed to parse allocate TLV entry", 
args[*cur_arg]);
goto fail;
@@ -2516,7 +2516,7 @@ void srv_settings_cpy(struct server *srv, const struct 
server *src, int srv_tmpl
list_for_each_entry(srv_tlv, >pp_tlvs, list) {
if (srv_tlv == NULL)
break;
-   new_srv_tlv = malloc(sizeof(struct srv_pp_tlv_list));
+   new_srv_tlv = malloc(sizeof(*new_srv_tlv));
if (unlikely(!new_srv_tlv)) {
break;
}
diff --git a/src/tcpcheck.c b/src/tcpcheck.c
index c36b9d9a6..c00c47fed 100644
--- a/src/tcpcheck.c
+++ b/src/tcpcheck.c
@@ -4270,7 +4270,7 @@ int proxy_parse_smtpchk_opt(char **args, int cur_arg, 
struct proxy *curpx, const
(strcmp(args[cur_arg], "EHLO") == 0 || strcmp(args[cur_arg], 
"HELO") == 0)) {
/*  + space (1) +  + null byte (1) */
size_t len = strlen(args[cur_arg]) + 1 + 
strlen(args[cur_arg+1]) + 1;
-   cmd = calloc(len, 1);
+   cmd = calloc(1, len);
if (cmd)
snprintf(cmd, len, "%s %s", args[cur_arg], 
args[cur_arg+1]);
}
-- 
2.42.0




[PATCH] CI: Update to actions/checkout@v4

2023-09-06 Thread Tim Duesterhus
No functional change, but we should keep this current.

see 5f4ddb54b05ae0355b1f64c22263a6bc381410df
---
 .github/workflows/aws-lc.yml   | 2 +-
 .github/workflows/codespell.yml| 2 +-
 .github/workflows/compliance.yml   | 2 +-
 .github/workflows/contrib.yml  | 2 +-
 .github/workflows/coverity.yml | 2 +-
 .github/workflows/cross-zoo.yml| 2 +-
 .github/workflows/fedora-rawhide.yml   | 2 +-
 .github/workflows/musl.yml | 2 +-
 .github/workflows/openssl-nodeprecated.yml | 2 +-
 .github/workflows/vtest.yml| 4 ++--
 .github/workflows/windows.yml  | 2 +-
 11 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/.github/workflows/aws-lc.yml b/.github/workflows/aws-lc.yml
index d884e3e79..b50292bb1 100644
--- a/.github/workflows/aws-lc.yml
+++ b/.github/workflows/aws-lc.yml
@@ -12,7 +12,7 @@ jobs:
   test:
 runs-on: ubuntu-latest
 steps:
-  - uses: actions/checkout@v3
+  - uses: actions/checkout@v4
   - name: Install VTest
 run: |
   scripts/build-vtest.sh
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index bacd87670..781aa8332 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -11,7 +11,7 @@ jobs:
   codespell:
 runs-on: ubuntu-latest
 steps:
-- uses: actions/checkout@v3
+- uses: actions/checkout@v4
 - uses: codespell-project/codespell-problem-matcher@v1
 - uses: codespell-project/actions-codespell@master
   with:
diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml
index 509eaf841..caf9624a8 100644
--- a/.github/workflows/compliance.yml
+++ b/.github/workflows/compliance.yml
@@ -19,7 +19,7 @@ jobs:
   CC: gcc
   os: ubuntu-latest
 steps:
-- uses: actions/checkout@v3
+- uses: actions/checkout@v4
 - name: Install h2spec
   id: install-h2spec
   run: |
diff --git a/.github/workflows/contrib.yml b/.github/workflows/contrib.yml
index 99a1576d8..4e1474481 100644
--- a/.github/workflows/contrib.yml
+++ b/.github/workflows/contrib.yml
@@ -10,7 +10,7 @@ jobs:
   build:
 runs-on: ubuntu-latest
 steps:
-- uses: actions/checkout@v3
+- uses: actions/checkout@v4
 - name: Compile admin/halog/halog
   run: |
 make admin/halog/halog
diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml
index ab4795e8c..0b3c4af91 100644
--- a/.github/workflows/coverity.yml
+++ b/.github/workflows/coverity.yml
@@ -17,7 +17,7 @@ jobs:
 runs-on: ubuntu-latest
 if: ${{ github.repository_owner == 'haproxy' }}
 steps:
-- uses: actions/checkout@v3
+- uses: actions/checkout@v4
 - name: Install apt dependencies
   run: |
 sudo apt-get update
diff --git a/.github/workflows/cross-zoo.yml b/.github/workflows/cross-zoo.yml
index f2c8d7ad8..d9864e298 100644
--- a/.github/workflows/cross-zoo.yml
+++ b/.github/workflows/cross-zoo.yml
@@ -97,7 +97,7 @@ jobs:
 sudo apt-get -yq --force-yes install \
 gcc-${{ matrix.platform.arch }} \
 ${{ matrix.platform.libs }}
-- uses: actions/checkout@v3
+- uses: actions/checkout@v4
 
 
 - name: install quictls
diff --git a/.github/workflows/fedora-rawhide.yml 
b/.github/workflows/fedora-rawhide.yml
index aa3abc585..8f2578154 100644
--- a/.github/workflows/fedora-rawhide.yml
+++ b/.github/workflows/fedora-rawhide.yml
@@ -17,7 +17,7 @@ jobs:
 container:
   image: fedora:rawhide
 steps:
-- uses: actions/checkout@v3
+- uses: actions/checkout@v4
 - name: Install dependencies
   run: |
 dnf -y install git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' 
perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils 
systemd-devel clang
diff --git a/.github/workflows/musl.yml b/.github/workflows/musl.yml
index 8eb8310fd..4017affb9 100644
--- a/.github/workflows/musl.yml
+++ b/.github/workflows/musl.yml
@@ -20,7 +20,7 @@ jobs:
 run: |
   ulimit -c unlimited
   echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
-  - uses: actions/checkout@v3
+  - uses: actions/checkout@v4
   - name: Install dependencies
 run: apk add gcc gdb make tar git python3 libc-dev linux-headers 
pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
   - name: Install VTest
diff --git a/.github/workflows/openssl-nodeprecated.yml 
b/.github/workflows/openssl-nodeprecated.yml
index e7f7ffaa5..a04c6cbfe 100644
--- a/.github/workflows/openssl-nodeprecated.yml
+++ b/.github/workflows/openssl-nodeprecated.yml
@@ -21,7 +21,7 @@ jobs:
   test:
 runs-on: ubuntu-latest
 steps:
-- uses: actions/checkout@v3
+- uses: actions/checkout@v4
 - name: Install VTest
   run: |
 scripts/build-vtest.sh
diff --git a/.github/workflows/vtest.yml 

[PATCH] REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+ (3)

2023-08-07 Thread Tim Duesterhus
Introduced in:

424981cde REGTEST: add ifnone-forwardfor test
b015b3eb1 REGTEST: add RFC7239 forwarded header tests

see also:

fbbbc33df REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+
---
 reg-tests/http-rules/forwarded-header-7239.vtc | 2 +-
 reg-tests/http-rules/ifnone-forwardfor.vtc | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/reg-tests/http-rules/forwarded-header-7239.vtc 
b/reg-tests/http-rules/forwarded-header-7239.vtc
index 57c9faa2b..a894113e7 100644
--- a/reg-tests/http-rules/forwarded-header-7239.vtc
+++ b/reg-tests/http-rules/forwarded-header-7239.vtc
@@ -1,5 +1,5 @@
 varnishtest "Test RFC 7239 forwarded header support (forwarded option and 
related converters)"
-#REQUIRE_VERSION=2.8
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.8-dev0)'"
 
 # This config tests the HTTP forwarded option and RFC7239 related converters.
 
diff --git a/reg-tests/http-rules/ifnone-forwardfor.vtc 
b/reg-tests/http-rules/ifnone-forwardfor.vtc
index 4be82ac2f..a743b10b4 100644
--- a/reg-tests/http-rules/ifnone-forwardfor.vtc
+++ b/reg-tests/http-rules/ifnone-forwardfor.vtc
@@ -1,5 +1,5 @@
 varnishtest "Test if-none param for the forwardfor option"
-#REQUIRE_VERSION=2.6
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
 
 # This config tests the if-none parameter for the HTTP forwardfor option.
 
-- 
2.41.0




[PATCH] BUG/MINOR: Fix Lua's `get_stats` function

2023-06-01 Thread Tim Duesterhus
Lua's `get_stats` function stopped working in
4cfb0019e65bce79953164eddf54c1bbb61add62, due to the addition a new field
ST_F_PROTO without a corresponding entry in `stat_fields`.

Fix the issue by adding the entry, like
a46b142e8807ea640e041d3a29e3fd427844d559 did previously for a different field.

This patch fixes GitHub Issue #2174, it should be backported to 2.8.
---
 src/stats.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/stats.c b/src/stats.c
index 68adde6c5..1d071e2c1 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -274,6 +274,7 @@ const struct name_desc stat_fields[ST_F_TOTAL_FIELDS] = {
[ST_F_H1REQ] = { .name = "h1req",   
.desc = "Total number of HTTP/1 sessions processed by this object since 
the worker process started" },
[ST_F_H2REQ] = { .name = "h2req",   
.desc = "Total number of hTTP/2 sessions processed by this object since 
the worker process started" },
[ST_F_H3REQ] = { .name = "h3req",   
.desc = "Total number of HTTP/3 sessions processed by this object since 
the worker process started" },
+   [ST_F_PROTO] = { .name = "proto",   
.desc = "Protocol" },
 };
 
 /* one line of info */
-- 
2.40.1




[PATCH] BUG/MINOR: ssl: Stop leaking `err` in ssl_sock_load_ocsp()

2023-03-19 Thread Tim Duesterhus
Previously performing a config check of `.github/h2spec.config` would report a
20 byte leak as reported in GitHub Issue #2082.

The leak was introduced in a6c0a59e9af65180c3ff591b91855bea8d19b352, which is
dev only. No backport needed.
---
 src/ssl_sock.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 5993a72b06..801543cb77 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -1296,6 +1296,8 @@ static int ssl_sock_load_ocsp(const char *path, SSL_CTX 
*ctx, struct ckch_data *
if (warn)
free(warn);
 
+   free(err);
+
return ret;
 }
 
-- 
2.40.0




[PATCH 3/3] REGTESTS: Remove tests with REQUIRE_VERSION_BELOW=1.9

2023-01-04 Thread Tim Duesterhus
HAProxy 2.0 is the lowest supported version, thus this never matches.
---
 reg-tests/http-rules/h1_to_h1c.vtc | 186 -
 1 file changed, 186 deletions(-)
 delete mode 100644 reg-tests/http-rules/h1_to_h1c.vtc

diff --git a/reg-tests/http-rules/h1_to_h1c.vtc 
b/reg-tests/http-rules/h1_to_h1c.vtc
deleted file mode 100644
index 9ae73f70b9..00
--- a/reg-tests/http-rules/h1_to_h1c.vtc
+++ /dev/null
@@ -1,186 +0,0 @@
-varnishtest "Composite HTTP manipulation test (H1 clear to H1 clear)"
-#REQUIRE_VERSION_BELOW=1.9
-
-# This config tests several http-request features and their interactions.
-# It extracts some samples, places them into variables, modifies some header
-# fields, appends multiple identical header fields, overwrites the start line
-# using several methods, then dumps the initial list of variables and the final
-# one, then applies CRC32 to these values as signatures that are easy to test.
-# Then it does it again in the backend after saving the current headers into
-# the same names prefixed by "fe-". Then it does the same on the response path.
-# If some modifications are performed, the crc values need to be adjusted based
-# on the failed logs.
-#
-# Run it with HAPROXY_PROGRAM=$PWD/haproxy varnishtest -l -k -t 1 "$1"
-
-feature ignore_unknown_macro
-
-server s1 {
-   rxreq
-   txresp \
- -status 234 \
- -hdr "hdr1: val1" \
- -hdr "hdr2:  val2a" \
- -hdr "hdr2:val2b" \
- -hdr "hdr3:  val3a, val3b" \
- -hdr "hdr4:" \
- -body "This is a body"
-
-   expect req.method == "GET"
-   expect req.http.fe-sl1-crc == 992395575
-   expect req.http.fe-sl2-crc == 1270056220
-   expect req.http.fe-hdr-crc == 1719311923
-   expect req.http.be-sl1-crc == 2604236007
-   expect req.http.be-sl2-crc == 4181358964
-   expect req.http.be-hdr-crc == 3634102538
-} -start
-
-haproxy h1 -conf {
-defaults
-   mode http
-   timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
-   timeout client  "${HAPROXY_TEST_TIMEOUT-5s}"
-   timeout server  "${HAPROXY_TEST_TIMEOUT-5s}"
-
-frontend fe
-   bind "fd@${fe}"
-
-    requests
-   http-request set-var(req.method) method
-   http-request set-var(req.uri)url
-   http-request set-var(req.path)   path
-   http-request set-var(req.query)  query
-   http-request set-var(req.param)  url_param(qs_arg)
-
-   http-request set-header sl1  "sl1: "
-
-   http-request set-method  "%[str(GET)]"
-   http-request set-uri concat(/bu/,req.uri,/eu)
-   http-request set-path"/bp/%[var(req.path)]/ep"
-   http-request set-query   "bq&%[var(req.query)]"
-
-   http-request set-header sl2  "sl2: "
-
-   http-request set-header sl1  "%[req.fhdr(sl1)] 
method=<%[var(req.method)]>; uri=<%[var(req.uri)]>; path=<%[var(req.path)]>;"
-   http-request set-header sl1  "%[req.fhdr(sl1)] 
query=<%[var(req.query)]>; param=<%[var(req.param)]>"
-   http-request set-header sl2  "%[req.fhdr(sl2)] 
method=<%[method]>; uri=<%[url]>; path=<%[path]>; "
-   http-request set-header sl2  "%[req.fhdr(sl2)] 
query=<%[query]>; param=<%[url_param(qs_arg)]>"
-   http-request set-header hdr  "%[req.fhdr(hdr)] 
hdr1=<%[req.hdr(hdr1)]>; fhdr1=<%[req.fhdr(hdr1)]>;"
-   http-request set-header hdr  "%[req.fhdr(hdr)] 
hdr2=<%[req.hdr(hdr2)]>; fhdr2=<%[req.fhdr(hdr2)]>;"
-   http-request set-header hdr  "%[req.fhdr(hdr)] 
hdr3=<%[req.hdr(hdr3)]>; fhdr3=<%[req.fhdr(hdr3)]>;"
-   http-request set-header hdr  "%[req.fhdr(hdr)] 
hdr4=<%[req.hdr(hdr4)]>; fhdr4=<%[req.fhdr(hdr4)]>;"
-
-   http-request set-header sl1-crc  "%[req.fhdr(sl1),crc32]"
-   http-request set-header sl2-crc  "%[req.fhdr(sl2),crc32]"
-   http-request set-header hdr-crc  "%[req.fhdr(hdr),crc32]"
-
-    responses
-   http-response set-headerbe-sl1   "%[res.fhdr(sl1)]"
-   http-response set-headerbe-sl2   "%[res.fhdr(sl2)]"
-   http-response set-headerbe-hdr   "%[res.fhdr(hdr)]"
-
-   http-response set-header  be-sl1-crc "%[res.fhdr(sl1-crc)]"
-   http-response set-header  be-sl2-crc "%[res.fhdr(sl2-crc)]"
-   http-response set-header  be-hdr-crc "%[res.fhdr(hdr-crc)]"
-
-   http-response set-var(res.status)status
-   http-response set-headersl1  "sl1: "
-
-   http-response set-status200
-
-   http-response set-headersl2  "sl2: "
-
-   http-response set-headersl1  "%[res.fhdr(sl1)] 
status=<%[var(res.status)]>;"
-   http-response set-headersl2  "%[res.fhdr(sl2)] 
status=<%[status]>;"
-   http-response set-headerhdr  "%[res.fhdr(hdr)] 
hdr1=<%[res.hdr(hdr1)]>; fhdr1=<%[res.fhdr(hdr1)]>;"
-   http-response set-headerhdr  

[PATCH 1/3] REGTESTS: Remove REQUIRE_VERSION=1.9 from all tests

2023-01-04 Thread Tim Duesterhus
HAProxy 2.0 is the lowest supported version, thus this always matches.

see 1b095cac9468d0c3eeb157e9b1a2947487bd3c83
---
 reg-tests/cache/basic.vtc  | 2 --
 reg-tests/cache/sample_fetches.vtc | 2 --
 reg-tests/compression/basic.vtc| 1 -
 reg-tests/compression/etags_conversion.vtc | 1 -
 reg-tests/compression/vary.vtc | 1 -
 reg-tests/http-messaging/h2_to_h1.vtc  | 1 -
 .../http-rules/converters_ipmask_concat_strcmp_field_word.vtc  | 3 ---
 reg-tests/http-rules/h1or2_to_h1c.vtc  | 1 -
 reg-tests/mcli/mcli_show_info.vtc  | 2 --
 9 files changed, 14 deletions(-)

diff --git a/reg-tests/cache/basic.vtc b/reg-tests/cache/basic.vtc
index d6a8c00b3b..377cbb3792 100644
--- a/reg-tests/cache/basic.vtc
+++ b/reg-tests/cache/basic.vtc
@@ -1,7 +1,5 @@
 varnishtest "Basic cache test"
 
-#REQUIRE_VERSION=1.9
-
 feature ignore_unknown_macro
 
 server s1 {
diff --git a/reg-tests/cache/sample_fetches.vtc 
b/reg-tests/cache/sample_fetches.vtc
index c2b1d15be0..c2b99c20ca 100644
--- a/reg-tests/cache/sample_fetches.vtc
+++ b/reg-tests/cache/sample_fetches.vtc
@@ -1,8 +1,6 @@
 
 varnishtest "Basic cache test"
 
-#REQUIRE_VERSION=1.9
-
 feature ignore_unknown_macro
 
 server s1 {
diff --git a/reg-tests/compression/basic.vtc b/reg-tests/compression/basic.vtc
index 76ad43dc37..5d9eadabde 100644
--- a/reg-tests/compression/basic.vtc
+++ b/reg-tests/compression/basic.vtc
@@ -1,6 +1,5 @@
 varnishtest "Basic compression test"
 
-#REQUIRE_VERSION=1.9
 #REQUIRE_OPTION=ZLIB|SLZ
 
 feature ignore_unknown_macro
diff --git a/reg-tests/compression/etags_conversion.vtc 
b/reg-tests/compression/etags_conversion.vtc
index c5684a20ff..96e34bcf9c 100644
--- a/reg-tests/compression/etags_conversion.vtc
+++ b/reg-tests/compression/etags_conversion.vtc
@@ -1,6 +1,5 @@
 varnishtest "Compression converts strong ETags to weak ETags"
 
-#REQUIRE_VERSION=1.9
 #REQUIRE_OPTION=ZLIB|SLZ
 
 feature ignore_unknown_macro
diff --git a/reg-tests/compression/vary.vtc b/reg-tests/compression/vary.vtc
index 8219c734e6..34d8976c09 100644
--- a/reg-tests/compression/vary.vtc
+++ b/reg-tests/compression/vary.vtc
@@ -1,6 +1,5 @@
 varnishtest "Compression sets Vary header"
 
-#REQUIRE_VERSION=1.9
 #REQUIRE_OPTION=ZLIB|SLZ
 
 feature ignore_unknown_macro
diff --git a/reg-tests/http-messaging/h2_to_h1.vtc 
b/reg-tests/http-messaging/h2_to_h1.vtc
index 852ee4caf9..91d1056d86 100644
--- a/reg-tests/http-messaging/h2_to_h1.vtc
+++ b/reg-tests/http-messaging/h2_to_h1.vtc
@@ -1,5 +1,4 @@
 varnishtest "HTTP request tests: H2 to H1 (HTX and legacy mode)"
-#REQUIRE_VERSION=1.9
 
 # Run it with HAPROXY_PROGRAM=$PWD/haproxy varnishtest -l -k -t 1 "$1"
 
diff --git 
a/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.vtc 
b/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.vtc
index e567e38780..00d74ce287 100644
--- a/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.vtc
+++ b/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.vtc
@@ -1,9 +1,6 @@
 varnishtest "Minimal tests for 1.9 converters: ipmask,concat,strcmp,field,word"
 feature ignore_unknown_macro
 
-# concat,strcmp,ipmask(ipv6mask) need 1.9
-#REQUIRE_VERSION=1.9
-
 # ipmask tests server
 server s1 {
rxreq
diff --git a/reg-tests/http-rules/h1or2_to_h1c.vtc 
b/reg-tests/http-rules/h1or2_to_h1c.vtc
index 9e7eb604a3..3423d162a1 100644
--- a/reg-tests/http-rules/h1or2_to_h1c.vtc
+++ b/reg-tests/http-rules/h1or2_to_h1c.vtc
@@ -1,5 +1,4 @@
 varnishtest "Composite HTTP manipulation test (H1 and H2 clear to H1 clear)"
-#REQUIRE_VERSION=1.9
 
 # This config tests several http-request features and their interactions.
 # It extracts some samples, places them into variables, modifies some header
diff --git a/reg-tests/mcli/mcli_show_info.vtc 
b/reg-tests/mcli/mcli_show_info.vtc
index ae533da3a8..3c44461a77 100644
--- a/reg-tests/mcli/mcli_show_info.vtc
+++ b/reg-tests/mcli/mcli_show_info.vtc
@@ -1,7 +1,5 @@
 varnishtest "Show info of process 1"
 
-#REQUIRE_VERSION=1.9
-
 feature ignore_unknown_macro
 
 # Do nothing. Is there only to create s1_* macros
-- 
2.39.0




[PATCH 2/3] REGTESTS: Remove REQUIRE_VERSION=2.0 from all tests

2023-01-04 Thread Tim Duesterhus
HAProxy 2.0 is the lowest supported version, thus this always matches.

see 1b095cac9468d0c3eeb157e9b1a2947487bd3c83
---
 reg-tests/balance/balance-uri.vtc  | 1 -
 reg-tests/checks/tcp-checks-socks4.vtc | 1 -
 reg-tests/http-rules/acl_cli_spaces.vtc| 2 --
 reg-tests/log/load_balancing.vtc   | 2 --
 reg-tests/mcli/mcli_start_progs.vtc| 1 -
 reg-tests/peers/basic_sync.vtc | 1 -
 reg-tests/peers/basic_sync_wo_stkt_backend.vtc | 1 -
 reg-tests/peers/tls_basic_sync.vtc | 1 -
 reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc | 1 -
 reg-tests/stickiness/lb-services.vtc   | 2 --
 reg-tests/stream/unique-id.vtc | 2 --
 11 files changed, 15 deletions(-)

diff --git a/reg-tests/balance/balance-uri.vtc 
b/reg-tests/balance/balance-uri.vtc
index cc65d64a36..e67883567b 100644
--- a/reg-tests/balance/balance-uri.vtc
+++ b/reg-tests/balance/balance-uri.vtc
@@ -1,6 +1,5 @@
 vtest "Test for balance URI"
 feature ignore_unknown_macro
-#REQUIRE_VERSION=2.0
 
 server s1 {
 rxreq
diff --git a/reg-tests/checks/tcp-checks-socks4.vtc 
b/reg-tests/checks/tcp-checks-socks4.vtc
index 04c23ec535..8a730f5424 100644
--- a/reg-tests/checks/tcp-checks-socks4.vtc
+++ b/reg-tests/checks/tcp-checks-socks4.vtc
@@ -1,5 +1,4 @@
 varnishtest "Health-checks: basic HTTP health-check though a socks4 proxy"
-#REQUIRE_VERSION=2.0
 #REGTEST_TYPE=slow
 feature ignore_unknown_macro
 
diff --git a/reg-tests/http-rules/acl_cli_spaces.vtc 
b/reg-tests/http-rules/acl_cli_spaces.vtc
index e61176a7a0..a554977486 100644
--- a/reg-tests/http-rules/acl_cli_spaces.vtc
+++ b/reg-tests/http-rules/acl_cli_spaces.vtc
@@ -1,8 +1,6 @@
 varnishtest "haproxy ACL, CLI and mCLI spaces"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.0
-
 server s1 {
rxreq
expect req.method == "GET"
diff --git a/reg-tests/log/load_balancing.vtc b/reg-tests/log/load_balancing.vtc
index 22aacaefed..5c56e653dc 100644
--- a/reg-tests/log/load_balancing.vtc
+++ b/reg-tests/log/load_balancing.vtc
@@ -1,8 +1,6 @@
 varnishtest "Basic log load-balancing test"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.0
-
 barrier b1 cond 2 -cyclic
 barrier b2 cond 2 -cyclic
 barrier b3 cond 2 -cyclic
diff --git a/reg-tests/mcli/mcli_start_progs.vtc 
b/reg-tests/mcli/mcli_start_progs.vtc
index a2e0f759e9..51b335ce5e 100644
--- a/reg-tests/mcli/mcli_start_progs.vtc
+++ b/reg-tests/mcli/mcli_start_progs.vtc
@@ -1,6 +1,5 @@
 varnishtest "Try to start a master CLI with 2 programs"
 #REGTEST_TYPE=bug
-#REQUIRE_VERSION=2.0
 feature cmd "command -v sleep"
 
 feature ignore_unknown_macro
diff --git a/reg-tests/peers/basic_sync.vtc b/reg-tests/peers/basic_sync.vtc
index 6c80419173..5c0cb4117c 100644
--- a/reg-tests/peers/basic_sync.vtc
+++ b/reg-tests/peers/basic_sync.vtc
@@ -1,7 +1,6 @@
 vtest "Basic test for peers protocol"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.0
 #REGTEST_TYPE=slow
 
 haproxy h1 -arg "-L A" -conf {
diff --git a/reg-tests/peers/basic_sync_wo_stkt_backend.vtc 
b/reg-tests/peers/basic_sync_wo_stkt_backend.vtc
index 5353aab0e4..9f97ff54e5 100644
--- a/reg-tests/peers/basic_sync_wo_stkt_backend.vtc
+++ b/reg-tests/peers/basic_sync_wo_stkt_backend.vtc
@@ -1,7 +1,6 @@
 vtest "Basic test for peers protocol stick-table declared in peers sections"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.0
 #REGTEST_TYPE=slow
 
 haproxy h1 -arg "-L A" -conf {
diff --git a/reg-tests/peers/tls_basic_sync.vtc 
b/reg-tests/peers/tls_basic_sync.vtc
index 52f2f170cc..95e3d736e3 100644
--- a/reg-tests/peers/tls_basic_sync.vtc
+++ b/reg-tests/peers/tls_basic_sync.vtc
@@ -1,6 +1,5 @@
 vtest "Basic test for peers protocol over SSL/TLS"
 #REQUIRE_OPTIONS=OPENSSL
-#REQUIRE_VERSION=2.0
 feature ignore_unknown_macro
 
 #REGTEST_TYPE=slow
diff --git a/reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc 
b/reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc
index cb5849c3bf..bf467084b8 100644
--- a/reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc
+++ b/reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc
@@ -1,7 +1,6 @@
 vtest "Basic test for peers protocol over SSL/TLS with stick-table declared in 
peers sections"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.0
 #REQUIRE_OPTIONS=OPENSSL
 #REGTEST_TYPE=slow
 
diff --git a/reg-tests/stickiness/lb-services.vtc 
b/reg-tests/stickiness/lb-services.vtc
index a4e016f5dd..81da9c3f67 100644
--- a/reg-tests/stickiness/lb-services.vtc
+++ b/reg-tests/stickiness/lb-services.vtc
@@ -6,8 +6,6 @@ feature ignore_unknown_macro
 # do the job they are supposed to do.
 # If we remove one of the "stick on" rule, this script fails.
 
-#REQUIRE_VERSION=2.0
-
 server s_not_used_1 {}
 server s_not_used_2 {}
 server s_not_used_3 {}
diff --git a/reg-tests/stream/unique-id.vtc b/reg-tests/stream/unique-id.vtc
index 3cb5a705f5..0607b2a4fa 100644
--- a/reg-tests/stream/unique-id.vtc
+++ b/reg-tests/stream/unique-id.vtc
@@ -1,7 

[PATCH 2/6] CI: Add in-memory cache for the latest OpenSSL/LibreSSL

2022-12-29 Thread Tim Duesterhus
These functions were previously called once per compiler. Add the `lru_cache`
decorator to only perform one HTTP request each.
---
 .github/matrix.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/.github/matrix.py b/.github/matrix.py
index 7cd04e88a6..3df259cd8c 100755
--- a/.github/matrix.py
+++ b/.github/matrix.py
@@ -8,6 +8,7 @@
 # as published by the Free Software Foundation; either version
 # 2 of the License, or (at your option) any later version.
 
+import functools
 import json
 import sys
 import urllib.request
@@ -25,6 +26,7 @@ print("Generating matrix for branch '{}'.".format(ref_name))
 def clean_ssl(ssl):
 return ssl.replace("_VERSION", "").lower()
 
+@functools.lru_cache(5)
 def determine_latest_openssl(ssl):
 headers = {'Authorization': 'token ' + environ.get('GITHUB_API_TOKEN')} if 
environ.get('GITHUB_API_TOKEN') else {}
 request = 
urllib.request.Request('https://api.github.com/repos/openssl/openssl/tags', 
headers=headers)
@@ -38,6 +40,7 @@ def determine_latest_openssl(ssl):
latest_tag = name
 return "OPENSSL_VERSION={}".format(latest_tag[8:])
 
+@functools.lru_cache(5)
 def determine_latest_libressl(ssl):
 libressl_download_list = 
urllib.request.urlopen("http://ftp.openbsd.org/pub/OpenBSD/LibreSSL/;)
 for line in libressl_download_list.readlines():
-- 
2.39.0




[PATCH 6/6] CI: Reformat `matrix.py` using `black`

2022-12-29 Thread Tim Duesterhus
The initial version of matrix.py was formatted using `black` [1], but with all
the later changes, the formatting diverged quite a bit. This patch reformats
the script using black, fixing the indentation of some statements and
shortening overlong lines.

[1] https://github.com/psf/black
---
 .github/matrix.py | 40 +++-
 1 file changed, 23 insertions(+), 17 deletions(-)

diff --git a/.github/matrix.py b/.github/matrix.py
index c58bb7acd4..a0e90bc2db 100755
--- a/.github/matrix.py
+++ b/.github/matrix.py
@@ -10,9 +10,9 @@
 
 import functools
 import json
+import re
 import sys
 import urllib.request
-import re
 from os import environ
 
 if len(sys.argv) == 2:
@@ -23,35 +23,43 @@ else:
 
 print("Generating matrix for branch '{}'.".format(ref_name))
 
+
 def clean_ssl(ssl):
 return ssl.replace("_VERSION", "").lower()
 
+
 @functools.lru_cache(5)
 def determine_latest_openssl(ssl):
 headers = {}
-if environ.get('GITHUB_TOKEN') is not None:
-headers["Authorization"] = "token 
{}".format(environ.get('GITHUB_TOKEN'))
+if environ.get("GITHUB_TOKEN") is not None:
+headers["Authorization"] = "token 
{}".format(environ.get("GITHUB_TOKEN"))
 
-request = 
urllib.request.Request('https://api.github.com/repos/openssl/openssl/tags', 
headers=headers)
+request = urllib.request.Request(
+"https://api.github.com/repos/openssl/openssl/tags;, headers=headers
+)
 openssl_tags = urllib.request.urlopen(request)
-tags = json.loads(openssl_tags.read().decode('utf-8'))
-latest_tag = ''
+tags = json.loads(openssl_tags.read().decode("utf-8"))
+latest_tag = ""
 for tag in tags:
-name = tag['name']
+name = tag["name"]
 if "openssl-" in name:
 if name > latest_tag:
-   latest_tag = name
+latest_tag = name
 return "OPENSSL_VERSION={}".format(latest_tag[8:])
 
+
 @functools.lru_cache(5)
 def determine_latest_libressl(ssl):
-libressl_download_list = 
urllib.request.urlopen("http://ftp.openbsd.org/pub/OpenBSD/LibreSSL/;)
+libressl_download_list = urllib.request.urlopen(
+"http://ftp.openbsd.org/pub/OpenBSD/LibreSSL/;
+)
 for line in libressl_download_list.readlines():
 decoded_line = line.decode("utf-8")
 if "libressl-" in decoded_line and ".tar.gz.asc" in decoded_line:
- l = re.split("libressl-|.tar.gz.asc", decoded_line)[1]
+l = re.split("libressl-|.tar.gz.asc", decoded_line)[1]
 return "LIBRESSL_VERSION={}".format(l)
 
+
 def clean_compression(compression):
 return compression.replace("USE_", "").lower()
 
@@ -115,7 +123,7 @@ for CC in ["gcc", "clang"]:
 }
 )
 
-# ASAN
+# ASAN
 
 matrix.append(
 {
@@ -150,9 +158,7 @@ for CC in ["gcc", "clang"]:
 for compression in ["USE_ZLIB=1"]:
 matrix.append(
 {
-"name": "{}, {}, gz={}".format(
-os, CC, clean_compression(compression)
-),
+"name": "{}, {}, gz={}".format(os, CC, 
clean_compression(compression)),
 "os": os,
 "TARGET": TARGET,
 "CC": CC,
@@ -165,7 +171,7 @@ for CC in ["gcc", "clang"]:
 "OPENSSL_VERSION=1.0.2u",
 "OPENSSL_VERSION=1.1.1s",
 "QUICTLS=yes",
-#"BORINGSSL=yes",
+# "BORINGSSL=yes",
 ]
 
 if "haproxy-" not in ref_name:
@@ -220,6 +226,6 @@ for CC in ["clang"]:
 
 print(json.dumps(matrix, indent=4, sort_keys=True))
 
-if environ.get('GITHUB_OUTPUT') is not None:
-with open(environ.get('GITHUB_OUTPUT'), 'a') as f:
+if environ.get("GITHUB_OUTPUT") is not None:
+with open(environ.get("GITHUB_OUTPUT"), "a") as f:
 print("matrix={}".format(json.dumps({"include": matrix})), file=f)
-- 
2.39.0




[PATCH 1/6] CI: Improve headline in matrix.py

2022-12-29 Thread Tim Duesterhus
Since 4a04cd35ae89bf6a3bb7620f7a49804de3240ac4 (CI: github: split ssl lib
selection based on git branch) the branch, instead of the workflow type is
passed. The headline should reflect that.
---
 .github/matrix.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/matrix.py b/.github/matrix.py
index 72e6b1a852..7cd04e88a6 100755
--- a/.github/matrix.py
+++ b/.github/matrix.py
@@ -20,7 +20,7 @@ else:
 print("Usage: {} ".format(sys.argv[0]), file=sys.stderr)
 sys.exit(1)
 
-print("Generating matrix for type '{}'.".format(ref_name))
+print("Generating matrix for branch '{}'.".format(ref_name))
 
 def clean_ssl(ssl):
 return ssl.replace("_VERSION", "").lower()
-- 
2.39.0




[PATCH 5/6] CI: Explicitly check environment variable against `None` in matrix.py

2022-12-29 Thread Tim Duesterhus
For consistency with `GITHUB_OUTPUT` at the bottom.
---
 .github/matrix.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/matrix.py b/.github/matrix.py
index e03453aa4d..c58bb7acd4 100755
--- a/.github/matrix.py
+++ b/.github/matrix.py
@@ -29,7 +29,7 @@ def clean_ssl(ssl):
 @functools.lru_cache(5)
 def determine_latest_openssl(ssl):
 headers = {}
-if environ.get('GITHUB_TOKEN'):
+if environ.get('GITHUB_TOKEN') is not None:
 headers["Authorization"] = "token 
{}".format(environ.get('GITHUB_TOKEN'))
 
 request = 
urllib.request.Request('https://api.github.com/repos/openssl/openssl/tags', 
headers=headers)
-- 
2.39.0




[PATCH 0/5] Changes to matrix.py

2022-12-29 Thread Tim Duesterhus
Willy,

please find some opinionated (formatting) changes to matrix.py that I believe
improve readability and maintainability of that script.

All of them may be backported if desired, but I did not add any such note to
the commit message. Also feel free to drop any patches you disagree with.

Best regards

Tim Duesterhus (6):
  CI: Improve headline in matrix.py
  CI: Add in-memory cache for the latest OpenSSL/LibreSSL
  CI: Use proper `if` blocks instead of conditional expressions in
matrix.py
  CI: Unify the `GITHUB_TOKEN` name across matrix.py and vtest.yml
  CI: Explicitly check environment variable against `None` in matrix.py
  CI: Reformat `matrix.py` using `black`

 .github/matrix.py   | 70 ++---
 .github/workflows/vtest.yml |  2 +-
 2 files changed, 50 insertions(+), 22 deletions(-)

-- 
2.39.0




[PATCH 4/6] CI: Unify the `GITHUB_TOKEN` name across matrix.py and vtest.yml

2022-12-29 Thread Tim Duesterhus
This makes naming a little clearer in matrix.py, because the name matches the
name of the actual secret.
---
 .github/matrix.py   | 4 ++--
 .github/workflows/vtest.yml | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/matrix.py b/.github/matrix.py
index ee44d48bc6..e03453aa4d 100755
--- a/.github/matrix.py
+++ b/.github/matrix.py
@@ -29,8 +29,8 @@ def clean_ssl(ssl):
 @functools.lru_cache(5)
 def determine_latest_openssl(ssl):
 headers = {}
-if environ.get('GITHUB_API_TOKEN'):
-headers["Authorization"] = "token 
{}".format(environ.get('GITHUB_API_TOKEN'))
+if environ.get('GITHUB_TOKEN'):
+headers["Authorization"] = "token 
{}".format(environ.get('GITHUB_TOKEN'))
 
 request = 
urllib.request.Request('https://api.github.com/repos/openssl/openssl/tags', 
headers=headers)
 openssl_tags = urllib.request.urlopen(request)
diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
index f26415a205..5137099de8 100644
--- a/.github/workflows/vtest.yml
+++ b/.github/workflows/vtest.yml
@@ -26,7 +26,7 @@ jobs:
   - uses: actions/checkout@v3
   - name: Generate Build Matrix
 env:
-  GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+  GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
 id: set-matrix
 run: python3 .github/matrix.py "${{ github.ref_name }}"
 
-- 
2.39.0




[PATCH 3/6] CI: Use proper `if` blocks instead of conditional expressions in matrix.py

2022-12-29 Thread Tim Duesterhus
For complex expressions, such as the ones modified, the condition expression is
much less readable, especially with the actual condition in the middle of the
"then" and "else" part.
---
 .github/matrix.py | 29 -
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/.github/matrix.py b/.github/matrix.py
index 3df259cd8c..ee44d48bc6 100755
--- a/.github/matrix.py
+++ b/.github/matrix.py
@@ -28,7 +28,10 @@ def clean_ssl(ssl):
 
 @functools.lru_cache(5)
 def determine_latest_openssl(ssl):
-headers = {'Authorization': 'token ' + environ.get('GITHUB_API_TOKEN')} if 
environ.get('GITHUB_API_TOKEN') else {}
+headers = {}
+if environ.get('GITHUB_API_TOKEN'):
+headers["Authorization"] = "token 
{}".format(environ.get('GITHUB_API_TOKEN'))
+
 request = 
urllib.request.Request('https://api.github.com/repos/openssl/openssl/tags', 
headers=headers)
 openssl_tags = urllib.request.urlopen(request)
 tags = json.loads(openssl_tags.read().decode('utf-8'))
@@ -66,7 +69,11 @@ matrix = []
 
 # Ubuntu
 
-os = "ubuntu-latest" if "haproxy-" not in ref_name else "ubuntu-22.04"
+if "haproxy-" in ref_name:
+os = "ubuntu-22.04"
+else:
+os = "ubuntu-latest"
+
 TARGET = "linux-glibc"
 for CC in ["gcc", "clang"]:
 matrix.append(
@@ -153,13 +160,21 @@ for CC in ["gcc", "clang"]:
 }
 )
 
-for ssl in [
+ssl_versions = [
 "stock",
 "OPENSSL_VERSION=1.0.2u",
 "OPENSSL_VERSION=1.1.1s",
 "QUICTLS=yes",
 #"BORINGSSL=yes",
-] + (["OPENSSL_VERSION=latest", "LIBRESSL_VERSION=latest"] if "haproxy-" 
not in ref_name else []):
+]
+
+if "haproxy-" not in ref_name:
+ssl_versions = ssl_versions + [
+"OPENSSL_VERSION=latest",
+"LIBRESSL_VERSION=latest",
+]
+
+for ssl in ssl_versions:
 flags = ["USE_OPENSSL=1"]
 if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl:
 flags.append("USE_QUIC=1")
@@ -184,7 +199,11 @@ for CC in ["gcc", "clang"]:
 
 # macOS
 
-os = "macos-latest" if "haproxy-" not in ref_name else "macos-12"
+if "haproxy-" in ref_name:
+os = "macos-12"
+else:
+os = "macos-latest"
+
 TARGET = "osx"
 for CC in ["clang"]:
 matrix.append(
-- 
2.39.0




[PATCH] CI: Add `schedule` to vtest.yml

2022-12-06 Thread Tim Duesterhus
William,

On 12/6/22 19:40, William Lallemand wrote:
> I disagree, porting to a new API is not something you would do just
> before a release, you need to do it progressively if possible, because
> it could introduce heavy development and sometimes discussions with the
> library developers and unfortunately that could take time.

I understand now. I thought this was primarily about bumping new patch
versions, not about testing new feature branches.

Please find a patch attached that will start to run the vtest.yml workflow
weekly early on Thursday morning, making the results available at the start
of the Thursday workday.

If that change is working correctly, we should see that the "Generate Build
Matrix" step outputs "Generating matrix for type 'schedule'." for those
scheduled jobs. You can then add whatever extra jobs you desire to matrix.py
by checking for `build_type == "schedule"`, like this:

ssl_versions = [
"stock",
"OPENSSL_VERSION=1.0.2u",
"OPENSSL_VERSION=3.0.0",
]

if build_type == "schedule":
ssl_versions.append("OPENSSL_VERSION=latest")

Best regards
Tim Düsterhus

Apply with `git am --scissors` to automatically cut the commit message.

-- >8 --
This will run the vtest.yml workflow weekly on every Thursday morning in
addition to running on every push.

As the `github.event_name` is passed as a parameter to `matrix.py`, this allows
to run specific jobs (e.g. heavy jobs or unstable ones) only on schedule if the
need arises.
---
 .github/workflows/vtest.yml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
index fb7b1d968a..65892cf6b8 100644
--- a/.github/workflows/vtest.yml
+++ b/.github/workflows/vtest.yml
@@ -10,6 +10,8 @@ name: VTest
 
 on:
   push:
+  schedule:
+  - cron: "37 5 * * 4" # 05:37 UTC on Thursday
 
 permissions:
   contents: read
-- 
2.38.1




[PATCH 2/2] CI: Replace the deprecated `::set-output` command by writing to $GITHUB_OUTPUT in workflow definition

2022-10-14 Thread Tim Duesterhus
See "CI: Replace the deprecated `::set-output` command by writing to
$GITHUB_OUTPUT in matrix.py" for the reasoning behind this commit.
---
 .github/workflows/compliance.yml | 4 ++--
 .github/workflows/vtest.yml  | 4 ++--
 .github/workflows/windows.yml| 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml
index 9d14ff57d5..509eaf8411 100644
--- a/.github/workflows/compliance.yml
+++ b/.github/workflows/compliance.yml
@@ -27,7 +27,7 @@ jobs:
 curl -fsSL 
https://github.com/summerwind/h2spec/releases/download/${H2SPEC_VERSION}/h2spec_linux_amd64.tar.gz
 -o h2spec.tar.gz
 tar xvf h2spec.tar.gz
 sudo install -m755 h2spec /usr/local/bin/h2spec
-echo "::set-output name=version::${H2SPEC_VERSION}"
+echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
 - name: Compile HAProxy with ${{ matrix.CC }}
   run: |
 make -j$(nproc) all \
@@ -50,7 +50,7 @@ jobs:
 fi
 echo "::endgroup::"
 haproxy -vv
-echo "::set-output name=version::$(haproxy -v |awk 'NR==1{print $3}')"
+echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
 - name: Launch HAProxy ${{ steps.show-version.outputs.version }}
   run: haproxy -f .github/h2spec.config -D
 - name: Run h2spec ${{ steps.install-h2spec.outputs.version }}
diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
index 7a1c1ef62a..75c79c25af 100644
--- a/.github/workflows/vtest.yml
+++ b/.github/workflows/vtest.yml
@@ -53,7 +53,7 @@ jobs:
 - name: Generate cache key
   id: generate-cache-key
   run: |
-echo "::set-output name=key::$(echo ${{ matrix.name }} | sha256sum | 
awk '{print $1}')"
+echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" 
>> $GITHUB_OUTPUT
 
 - name: Cache SSL libs
   if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 
'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
@@ -124,7 +124,7 @@ jobs:
 fi
 echo "::endgroup::"
 haproxy -vv
-echo "::set-output name=version::$(haproxy -v |awk 'NR==1{print $3}')"
+echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
 - name: Install problem matcher for VTest
   # This allows one to more easily see which tests fail.
   run: echo "::add-matcher::.github/vtest.json"
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
index 3d034617fe..3030908623 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/windows.yml
@@ -64,4 +64,4 @@ jobs:
   id: show-version
   run: |
 ./haproxy -vv
-echo "::set-output name=version::$(./haproxy -v |awk 'NR==1{print 
$3}')"
+echo "version=$(./haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
-- 
2.38.0




[PATCH 1/2] CI: Replace the deprecated `::set-output` command by writing to $GITHUB_OUTPUT in matrix.py

2022-10-14 Thread Tim Duesterhus
As announced in

https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/

the `::set-output` command is deprecated, because processes during the workflow
execution might output untrusted information that might include the
`::set-output` command, thus allowing these untrusted information to hijack the
build.

The replacement is writing to the file indicated by the `$GITHUB_OUTPUT`
environment variable.
---
 .github/matrix.py | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/.github/matrix.py b/.github/matrix.py
index 19ae87d79c..76e0893a7b 100755
--- a/.github/matrix.py
+++ b/.github/matrix.py
@@ -12,6 +12,7 @@ import json
 import sys
 import urllib.request
 import re
+from os import environ
 
 if len(sys.argv) == 2:
 build_type = sys.argv[1]
@@ -208,4 +209,6 @@ for CC in ["clang"]:
 
 print(json.dumps(matrix, indent=4, sort_keys=True))
 
-print("::set-output name=matrix::{}".format(json.dumps({"include": matrix})))
+if environ.get('GITHUB_OUTPUT') is not None:
+with open(environ.get('GITHUB_OUTPUT'), 'a') as f:
+print("matrix={}".format(json.dumps({"include": matrix})), file=f)
-- 
2.38.0




[PATCH 2/2] CLEANUP: Reapply strcmp.cocci

2022-10-08 Thread Tim Duesterhus
This reapplies strcmp.cocci across the whole src/ tree.
---
 src/cfgparse-quic.c | 4 ++--
 src/flt_bwlim.c | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/cfgparse-quic.c b/src/cfgparse-quic.c
index 5268e9adaf..f6706f2e0d 100644
--- a/src/cfgparse-quic.c
+++ b/src/cfgparse-quic.c
@@ -26,9 +26,9 @@ static int bind_parse_quic_cc_algo(char **args, int cur_arg, 
struct proxy *px,
return ERR_ALERT | ERR_FATAL;
}
 
-   if (!strcmp(args[cur_arg + 1], "newreno"))
+   if (strcmp(args[cur_arg + 1], "newreno") == 0)
cc_algo = _cc_algo_nr;
-   else if (!strcmp(args[cur_arg + 1], "cubic"))
+   else if (strcmp(args[cur_arg + 1], "cubic") == 0)
cc_algo = _cc_algo_cubic;
else {
memprintf(err, "'%s' : unknown control congestion algorithm", 
args[cur_arg]);
diff --git a/src/flt_bwlim.c b/src/flt_bwlim.c
index e3d3c0dc09..203f91d370 100644
--- a/src/flt_bwlim.c
+++ b/src/flt_bwlim.c
@@ -419,7 +419,7 @@ int check_bwlim_action(struct act_rule *rule, struct proxy 
*px, char **err)
conf = NULL;
if (fconf->id == bwlim_flt_id) {
conf = fconf->conf;
-   if (!strcmp(rule->arg.act.p[0], conf->name))
+   if (strcmp(rule->arg.act.p[0], conf->name) == 0)
break;
}
}
@@ -639,7 +639,7 @@ static int parse_bwlim_flt(char **args, int *cur_arg, 
struct proxy *px, struct f
if (fc->id == bwlim_flt_id) {
struct bwlim_config *c = fc->conf;
 
-   if (!strcmp(conf->name, c->name)) {
+   if (strcmp(conf->name, c->name) == 0) {
memprintf(err, "bwlim filter '%s' already 
declared for proxy '%s'\n",
  conf->name, px->id);
goto error;
-- 
2.38.0




[PATCH 0/2] Reapply Coccinelle patches

2022-10-08 Thread Tim Duesterhus
Willy,

as with the past branches, I've reapplied the Coccinelle patches to do
some cleanup before the release.

Best regards

Tim Duesterhus (2):
  CLEANUP: Reapply ist.cocci (2)
  CLEANUP: Reapply strcmp.cocci

 src/cfgparse-quic.c | 4 ++--
 src/flt_bwlim.c | 4 ++--
 src/hlua.c  | 3 +--
 3 files changed, 5 insertions(+), 6 deletions(-)

-- 
2.38.0




[PATCH 1/2] CLEANUP: Reapply ist.cocci (2)

2022-10-08 Thread Tim Duesterhus
This reapplies ist.cocci across the whole src/ tree.
---
 src/hlua.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/hlua.c b/src/hlua.c
index 19932a5b12..1595368796 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -6511,8 +6511,7 @@ static void _hlua_http_msg_delete(struct http_msg *msg, 
struct filter *filter, s
goto end;
 
v = htx_get_blk_value(htx, blk);
-   v.ptr += htxret.ret;
-   v.len -= htxret.ret;
+   v = istadv(v, htxret.ret);
 
v = isttrim(v, len);
/* trimming data in blk: discard everything after the offset
-- 
2.38.0




[PATCH] CLEANUP: Re-apply xalloc_size.cocci (2)

2022-06-01 Thread Tim Duesterhus
This reapplies the xalloc_size.cocci patch across the whole `src/` tree.

see 16cc16dd8235e7eb6c38b7abd210bd1e1d96b1d9
see 63ee0e4c01b94aee5fc6c6dd98cfc4480ae5ea46
---
 src/ncbuf.c  | 2 +-
 src/proto_quic.c | 2 +-
 src/quic_sock.c  | 3 ++-
 3 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/src/ncbuf.c b/src/ncbuf.c
index 1944cfe34..adb32b57a 100644
--- a/src/ncbuf.c
+++ b/src/ncbuf.c
@@ -726,7 +726,7 @@ struct rand_off {
 static struct rand_off *ncb_generate_rand_off(const struct ncbuf *buf)
 {
struct rand_off *roff;
-   roff = calloc(1, sizeof(struct rand_off));
+   roff = calloc(1, sizeof(*roff));
BUG_ON(!roff);
 
roff->off = rand() % (ncb_size(buf));
diff --git a/src/proto_quic.c b/src/proto_quic.c
index 55aa4b50f..ab1bef18f 100644
--- a/src/proto_quic.c
+++ b/src/proto_quic.c
@@ -703,7 +703,7 @@ static int quic_alloc_dghdlrs(void)
 {
int i;
 
-   quic_dghdlrs = calloc(global.nbthread, sizeof(struct quic_dghdlr));
+   quic_dghdlrs = calloc(global.nbthread, sizeof(*quic_dghdlrs));
if (!quic_dghdlrs) {
ha_alert("Failed to allocate the quic datagram handlers.\n");
return 0;
diff --git a/src/quic_sock.c b/src/quic_sock.c
index 6207af703..a391006af 100644
--- a/src/quic_sock.c
+++ b/src/quic_sock.c
@@ -466,7 +466,8 @@ static int quic_alloc_accept_queues(void)
 {
int i;
 
-   quic_accept_queues = calloc(global.nbthread, sizeof(struct 
quic_accept_queue));
+   quic_accept_queues = calloc(global.nbthread,
+   sizeof(*quic_accept_queues));
if (!quic_accept_queues) {
ha_alert("Failed to allocate the quic accept queues.\n");
return 0;
-- 
2.36.1




[haproxy/docs PATCH] Replace `primary` with `info` for HAProxy 2.5 on index.html

2022-05-31 Thread Tim Duesterhus
2.5 is neither the newest stable version, nor the newest LTS version, thus
there is no reason for it to be highlighted.
---
 docs/index.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/index.html b/docs/index.html
index 4aa77c9..403fce9 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -45,7 +45,7 @@



-   
+   


HAProxy 2.5

-- 
2.36.1




[PATCH] DOC: Fix formatting in configuration.txt to fix dconv

2022-05-27 Thread Tim Duesterhus
The missing space before the colon causes haproxy-dconv to misparse the
configuration.txt.
---
 doc/configuration.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/configuration.txt b/doc/configuration.txt
index 3a5728539..6343f9f13 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -18126,7 +18126,7 @@ ipv4() : ipv4
 ipv6() : ipv6
   Returns an ipv6.
 
-last_rule_file: string
+last_rule_file : string
   This returns the name of the configuration file containing the last final
   rule that was matched during stream analysis. A final rule is one that
   terminates the evaluation of the rule set (like an "accept", "deny" or
@@ -18140,7 +18140,7 @@ last_rule_file: string
   logs where was the rule that gave the final verdict, in order to help
   figure why a request was denied for example. See also "last_rule_line".
 
-last_rule_line: integer
+last_rule_line : integer
   This returns the line number in the configuration file where is located the
   last final rule that was matched during stream analysis. A final rule is one
   that terminates the evaluation of the rule set (like an "accept", "deny" or
-- 
2.36.1




[PATCH] REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+ (2)

2022-05-23 Thread Tim Duesterhus
Introduced in:

18c13d3bd MEDIUM: http-ana: Add a proxy option to restrict chars in request 
header names

see also:

fbbbc33df REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+
---
 reg-tests/http-rules/restrict_req_hdr_names.vtc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/reg-tests/http-rules/restrict_req_hdr_names.vtc 
b/reg-tests/http-rules/restrict_req_hdr_names.vtc
index 28a10d3db..a3a95939f 100644
--- a/reg-tests/http-rules/restrict_req_hdr_names.vtc
+++ b/reg-tests/http-rules/restrict_req_hdr_names.vtc
@@ -1,5 +1,5 @@
 varnishtest "http-restrict-req-hdr-names option tests"
-#REQUIRE_VERSION=2.6
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
 
 # This config tests "http-restrict-req-hdr-names" option
 
-- 
2.36.1




[PATCH v2] CLEANUP: tools: Crash if inet_ntop fails due to ENOSPC in sa2str

2022-05-23 Thread Tim Duesterhus
This is impossible, because we pass a destination buffer that is appropriately
sized to hold an IPv6 address.

This is related to GitHub issue #1599.
---
 src/tools.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/src/tools.c b/src/tools.c
index 79d1d5c9b..4ecbdc4d7 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -1375,7 +1375,10 @@ char * sa2str(const struct sockaddr_storage *addr, int 
port, int map_ports)
default:
return NULL;
}
-   inet_ntop(addr->ss_family, ptr, buffer, sizeof(buffer));
+   if (inet_ntop(addr->ss_family, ptr, buffer, sizeof(buffer)) == NULL) {
+   BUG_ON(errno == ENOSPC);
+   return NULL;
+   }
if (map_ports)
return memprintf(, "%s:%+d", buffer, port);
else
-- 
2.36.1




[PATCH 1/2] BUG/MEDIUM: tools: Fix `inet_ntop` usage in sa2str

2022-05-22 Thread Tim Duesterhus
The given size must be the size of the destination buffer, not the size of the
(binary) address representation.

This fixes GitHub issue #1599.

The bug was introduced in 92149f9a82a9b55c598f1cc815bc330c555f3561 which is in
2.4+. The fix must be backported there.
---
 src/tools.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/tools.c b/src/tools.c
index 9e629e5bd..b9a1121c6 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -1374,7 +1374,7 @@ char * sa2str(const struct sockaddr_storage *addr, int 
port, int map_ports)
default:
return NULL;
}
-   inet_ntop(addr->ss_family, ptr, buffer, get_addr_len(addr));
+   inet_ntop(addr->ss_family, ptr, buffer, sizeof(buffer));
if (map_ports)
return memprintf(, "%s:%+d", buffer, port);
else
-- 
2.36.1




[PATCH 2/2] CLEANUP: tools: Crash if inet_ntop fails in sa2str

2022-05-22 Thread Tim Duesterhus
This is impossible, because we pass a destination buffer that is appropriately
sized to hold an IPv6 address.

This is related to GitHub issue #1599.
---
 src/tools.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/src/tools.c b/src/tools.c
index b9a1121c6..ce82fea4a 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -1374,7 +1374,10 @@ char * sa2str(const struct sockaddr_storage *addr, int 
port, int map_ports)
default:
return NULL;
}
-   inet_ntop(addr->ss_family, ptr, buffer, sizeof(buffer));
+   if (inet_ntop(addr->ss_family, ptr, buffer, sizeof(buffer)) == NULL) {
+   BUG_ON("inet_ntop failed to convert");
+   return NULL;
+   }
if (map_ports)
return memprintf(, "%s:%+d", buffer, port);
else
-- 
2.36.1




[PATCH] CLEANUP: tools: Clean up non-QUIC error message handling in str2sa_range()

2022-05-22 Thread Tim Duesterhus
If QUIC support is enabled both branches of the ternary conditional are
identical, upsetting Coverity. Move the full conditional into the non-QUIC
preprocessor branch to make the code more clear.

This resolves GitHub issue #1710.
---
 src/tools.c | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/src/tools.c b/src/tools.c
index 9e629e5bd..4c93e1e82 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -1305,13 +1305,14 @@ struct sockaddr_storage *str2sa_range(const char *str, 
int *port, int *low, int
  (proto_type == PROTO_TYPE_DGRAM) ? "datagram" 
: "stream",
  ss.ss_family,
  str,
- (ctrl_type == SOCK_STREAM && proto_type == 
PROTO_TYPE_DGRAM) ?
 #ifndef USE_QUIC
- "; QUIC is not compiled in if this is what 
you were looking for."
+ (ctrl_type == SOCK_STREAM && proto_type == 
PROTO_TYPE_DGRAM)
+ ? "; QUIC is not compiled in if this is what 
you were looking for."
+ : ""
 #else
  ""
 #endif
- :"");
+   );
goto out;
}
 
-- 
2.36.1




[PATCH] CLEANUP: http_ana: Make use of the return value of stream_generate_unique_id()

2022-05-17 Thread Tim Duesterhus
Even if `unique_id` and `s->unique_id` are identical it is a bit odd to
`isttest()` `unique_id` and then use `s->unique_id` in the call to 
`http_add_header()`.

This "issue" was introduced in a17e66289c08a5bfadc1bb5b5f2c618c9299fe1b,
because before that commit the function returned the length of the ID, as it
was not an ist.
---
 src/http_ana.c | 2 +-
 src/stream.c   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/http_ana.c b/src/http_ana.c
index 4b3113e2d..69340656a 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -646,7 +646,7 @@ int http_process_request(struct stream *s, struct channel 
*req, int an_bit)
 
/* send unique ID if a "unique-id-header" is defined */
if (isttest(sess->fe->header_unique_id) &&
-   unlikely(!http_add_header(htx, sess->fe->header_unique_id, 
s->unique_id)))
+   unlikely(!http_add_header(htx, sess->fe->header_unique_id, 
unique_id)))
goto return_int_err;
}
 
diff --git a/src/stream.c b/src/stream.c
index c7002366f..a02ea9871 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -2861,7 +2861,7 @@ INITCALL0(STG_INIT, init_stream);
 
 /* Generates a unique ID based on the given , stores it in the given 
 and
  * returns the unique ID.
-
+ *
  * If this function fails to allocate memory IST_NULL is returned.
  *
  * If an ID is already stored within the stream nothing happens existing 
unique ID is
-- 
2.36.1




[PATCH v2 2/3] CLEANUP: Add missing header to hlua_fcn.c

2022-05-14 Thread Tim Duesterhus
Found with -Wmissing-prototypes:

src/hlua_fcn.c:53:5: fatal error: no previous prototype for function 
'hlua_checkboolean' [-Wmissing-prototypes]
int hlua_checkboolean(lua_State *L, int index)
^
src/hlua_fcn.c:53:1: note: declare 'static' if the function is not intended 
to be used outside of this translation unit
int hlua_checkboolean(lua_State *L, int index)
^
static
1 error generated.
---
 src/hlua_fcn.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c
index 4c16d90a8..5907d4855 100644
--- a/src/hlua_fcn.c
+++ b/src/hlua_fcn.c
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
-- 
2.36.1




[PATCH 3/3] CLEANUP: Remove unused function hlua_get_top_error_string

2022-05-14 Thread Tim Duesterhus
This function has no prototype defined in a header and is not used in hlua.c
either, thus it can be safely removed. Found with -Wmissing-prototypes.
---
 src/hlua.c | 10 --
 1 file changed, 10 deletions(-)

diff --git a/src/hlua.c b/src/hlua.c
index ef967a515..abe3556b6 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -498,16 +498,6 @@ __LJMP unsigned int hlua_checktable(lua_State *L, int 
argno)
return luaL_ref(L, LUA_REGISTRYINDEX);
 }
 
-/* Return the string that is of the top of the stack. */
-const char *hlua_get_top_error_string(lua_State *L)
-{
-   if (lua_gettop(L) < 1)
-   return "unknown error";
-   if (lua_type(L, -1) != LUA_TSTRING)
-   return "unknown error";
-   return lua_tostring(L, -1);
-}
-
 __LJMP const char *hlua_traceback(lua_State *L, const char* sep)
 {
lua_Debug ar;
-- 
2.36.1




[PATCH 1/3] CLEANUP: Add missing header to ssl_utils.c

2022-05-14 Thread Tim Duesterhus
Found with -Wmissing-prototypes:

src/ssl_utils.c:22:5: fatal error: no previous prototype for function 
'cert_get_pkey_algo' [-Wmissing-prototypes]
int cert_get_pkey_algo(X509 *crt, struct buffer *out)
^
src/ssl_utils.c:22:1: note: declare 'static' if the function is not 
intended to be used outside of this translation unit
int cert_get_pkey_algo(X509 *crt, struct buffer *out)
^
static
1 error generated.
---
 src/ssl_utils.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/ssl_utils.c b/src/ssl_utils.c
index 35c06f73d..0f4a8596c 100644
--- a/src/ssl_utils.c
+++ b/src/ssl_utils.c
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /* fill a buffer with the algorithm and size of a public key */
 int cert_get_pkey_algo(X509 *crt, struct buffer *out)
-- 
2.36.1




[PATCH 2/3] CLEANUP: Add missing header to ssl_utils.c

2022-05-14 Thread Tim Duesterhus
Found with -Wmissing-prototypes:

src/hlua_fcn.c:53:5: fatal error: no previous prototype for function 
'hlua_checkboolean' [-Wmissing-prototypes]
int hlua_checkboolean(lua_State *L, int index)
^
src/hlua_fcn.c:53:1: note: declare 'static' if the function is not intended 
to be used outside of this translation unit
int hlua_checkboolean(lua_State *L, int index)
^
static
1 error generated.
---
 src/hlua_fcn.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c
index 4c16d90a8..5907d4855 100644
--- a/src/hlua_fcn.c
+++ b/src/hlua_fcn.c
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
-- 
2.36.1




[PATCH] MINOR: Call deinit_and_exit(0) for `haproxy -vv`

2022-04-26 Thread Tim Duesterhus
It appears that it is safe to call perform a clean deinit at this point, so
let's do this to exercise the deinit paths some more.

Running `valgrind --leak-check=full --show-leak-kinds=all ./haproxy -vv` with
this change reports:

==261864== HEAP SUMMARY:
==261864== in use at exit: 344 bytes in 11 blocks
==261864==   total heap usage: 1,178 allocs, 1,167 frees, 1,102,089 bytes 
allocated
==261864==
==261864== 24 bytes in 1 blocks are still reachable in loss record 1 of 2
==261864==at 0x483DD99: calloc (in 
/usr/lib/x86_64-linux-gnu/valgrind/vgpreload_memcheck-amd64-linux.so)
==261864==by 0x324BA6: hap_register_pre_check (init.c:92)
==261864==by 0x155824: main (haproxy.c:3024)
==261864==
==261864== 320 bytes in 10 blocks are still reachable in loss record 2 of 2
==261864==at 0x483DD99: calloc (in 
/usr/lib/x86_64-linux-gnu/valgrind/vgpreload_memcheck-amd64-linux.so)
==261864==by 0x26E54E: cfg_register_postparser (cfgparse.c:4238)
==261864==by 0x155824: main (haproxy.c:3024)
==261864==
==261864== LEAK SUMMARY:
==261864==definitely lost: 0 bytes in 0 blocks
==261864==indirectly lost: 0 bytes in 0 blocks
==261864==  possibly lost: 0 bytes in 0 blocks
==261864==still reachable: 344 bytes in 11 blocks
==261864== suppressed: 0 bytes in 0 blocks

which is looking pretty good.
---
 src/haproxy.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/haproxy.c b/src/haproxy.c
index 6fbe85bd3..b43997b6c 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -1608,7 +1608,7 @@ static void init_args(int argc, char **argv)
display_version();
if (flag[1] == 'v')  /* -vv */
display_build_opts();
-   exit(0);
+   deinit_and_exit(0);
}
 #if defined(USE_EPOLL)
else if (*flag == 'd' && flag[1] == 'e')
-- 
2.36.0




[PATCH] CLEANUP: Destroy `http_err_chunks` members during deinit

2022-04-26 Thread Tim Duesterhus
To make the deinit function a proper inverse of the init function we need to
free the `http_err_chunks`:

==252081== 311,296 bytes in 19 blocks are still reachable in loss record 50 
of 50
==252081==at 0x483B7F3: malloc (in 
/usr/lib/x86_64-linux-gnu/valgrind/vgpreload_memcheck-amd64-linux.so)
==252081==by 0x2727EE: http_str_to_htx (http_htx.c:914)
==252081==by 0x272E60: http_htx_init (http_htx.c:1059)
==252081==by 0x26AC87: check_config_validity (cfgparse.c:4170)
==252081==by 0x155DFE: init (haproxy.c:2120)
==252081==by 0x155DFE: main (haproxy.c:3037)
---
 src/http_htx.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/src/http_htx.c b/src/http_htx.c
index d9584abae..ea4c25f1a 100644
--- a/src/http_htx.c
+++ b/src/http_htx.c
@@ -1112,6 +1112,9 @@ static void http_htx_deinit(void)
LIST_DELETE(_rep->list);
release_http_reply(http_rep);
}
+
+   for (rc = 0; rc < HTTP_ERR_SIZE; rc++)
+   chunk_destroy(_err_chunks[rc]);
 }
 
 REGISTER_CONFIG_POSTPARSER("http_htx", http_htx_init);
-- 
2.36.0




[PATCH] BUG/MINOR: Fix memory leak in resolvers_deinit()

2022-04-26 Thread Tim Duesterhus
A config like the following:

global
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd 
listeners

resolvers unbound
nameserver unbound 127.0.0.1:53

will report the following leak when running a configuration check:

==241882== 6,991 (6,952 direct, 39 indirect) bytes in 1 blocks are 
definitely lost in loss record 8 of 13
==241882==at 0x483DD99: calloc (in 
/usr/lib/x86_64-linux-gnu/valgrind/vgpreload_memcheck-amd64-linux.so)
==241882==by 0x25938D: cfg_parse_resolvers (resolvers.c:3193)
==241882==by 0x26A1E8: readcfgfile (cfgparse.c:2171)
==241882==by 0x156D72: init (haproxy.c:2016)
==241882==by 0x156D72: main (haproxy.c:3037)

because the `.px` member of `struct resolvers` is not freed.

The offending allocation was introduced in
c943799c865c04281454a7a54fd6c45c2b4d7e09 which is a reorganization that
happened during development of 2.4.x. This fix can likely be backported without
issue to 2.4+ and is likely not needed for earlier versions as the leak happens
during deinit only.
---
 src/resolvers.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/resolvers.c b/src/resolvers.c
index 0b7faf93d..3179073b5 100644
--- a/src/resolvers.c
+++ b/src/resolvers.c
@@ -2448,6 +2448,7 @@ static void resolvers_deinit(void)
abort_resolution(res);
}
 
+   free_proxy(resolvers->px);
free(resolvers->id);
free((char *)resolvers->conf.file);
task_destroy(resolvers->t);
-- 
2.36.0




[PATCH 1/2] CI: Update to actions/checkout@v3

2022-04-09 Thread Tim Duesterhus
No functional change, but we should keep this current.
---
 .github/workflows/codespell.yml| 2 +-
 .github/workflows/compliance.yml   | 2 +-
 .github/workflows/contrib.yml  | 2 +-
 .github/workflows/coverity.yml | 2 +-
 .github/workflows/musl.yml | 2 +-
 .github/workflows/openssl-nodeprecated.yml | 2 +-
 .github/workflows/vtest.yml| 4 ++--
 .github/workflows/windows.yml  | 2 +-
 8 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 3b3114135..2243d8b37 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -11,7 +11,7 @@ jobs:
   codespell:
 runs-on: ubuntu-latest
 steps:
-- uses: actions/checkout@v2
+- uses: actions/checkout@v3
 - uses: codespell-project/codespell-problem-matcher@v1
 - uses: codespell-project/actions-codespell@master
   with:
diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml
index 148ea2866..1105abbf2 100644
--- a/.github/workflows/compliance.yml
+++ b/.github/workflows/compliance.yml
@@ -21,7 +21,7 @@ jobs:
 env:
   H2SPEC_VERSION: '2.6.0'
 steps:
-- uses: actions/checkout@v2
+- uses: actions/checkout@v3
 - name: Install h2spec
   run: |
 curl -fsSL 
https://github.com/summerwind/h2spec/releases/download/v${H2SPEC_VERSION}/h2spec_linux_amd64.tar.gz
 -o h2spec.tar.gz
diff --git a/.github/workflows/contrib.yml b/.github/workflows/contrib.yml
index 480f61be8..99a1576d8 100644
--- a/.github/workflows/contrib.yml
+++ b/.github/workflows/contrib.yml
@@ -10,7 +10,7 @@ jobs:
   build:
 runs-on: ubuntu-latest
 steps:
-- uses: actions/checkout@v2
+- uses: actions/checkout@v3
 - name: Compile admin/halog/halog
   run: |
 make admin/halog/halog
diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml
index 0c4b2d0ed..e208c8cac 100644
--- a/.github/workflows/coverity.yml
+++ b/.github/workflows/coverity.yml
@@ -24,7 +24,7 @@ jobs:
   # parameters at whitespaces, without taking quoting into account.
   COVERITY_SCAN_BUILD_COMMAND: "make CC=clang TARGET=linux-glibc 
USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 
USE_SYSTEMD=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy 
WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 
DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 
51DEGREES_SRC=addons/51degrees/dummy/pattern 
ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib 
SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1"
 steps:
-- uses: actions/checkout@v2
+- uses: actions/checkout@v3
 - name: Install apt dependencies
   run: |
 sudo apt-get update
diff --git a/.github/workflows/musl.yml b/.github/workflows/musl.yml
index aba3f4e14..5a6b46a7b 100644
--- a/.github/workflows/musl.yml
+++ b/.github/workflows/musl.yml
@@ -13,7 +13,7 @@ jobs:
   container:
 image: alpine:latest
   steps:
-  - uses: actions/checkout@v2
+  - uses: actions/checkout@v3
   - name: Install dependencies
 run: apk add gcc make tar git python3 libc-dev linux-headers pcre-dev 
pcre2-dev openssl-dev lua5.3-dev grep socat curl
   - name: Install VTest
diff --git a/.github/workflows/openssl-nodeprecated.yml 
b/.github/workflows/openssl-nodeprecated.yml
index e423f58dd..e7f7ffaa5 100644
--- a/.github/workflows/openssl-nodeprecated.yml
+++ b/.github/workflows/openssl-nodeprecated.yml
@@ -21,7 +21,7 @@ jobs:
   test:
 runs-on: ubuntu-latest
 steps:
-- uses: actions/checkout@v2
+- uses: actions/checkout@v3
 - name: Install VTest
   run: |
 scripts/build-vtest.sh
diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
index a9e86b6a2..e413b20f5 100644
--- a/.github/workflows/vtest.yml
+++ b/.github/workflows/vtest.yml
@@ -23,7 +23,7 @@ jobs:
 outputs:
   matrix: ${{ steps.set-matrix.outputs.matrix }}
 steps:
-  - uses: actions/checkout@v2
+  - uses: actions/checkout@v3
   - name: Generate Build Matrix
 id: set-matrix
 run: python3 .github/matrix.py "${{ github.event_name }}"
@@ -44,7 +44,7 @@ jobs:
   ASAN_OPTIONS: log_path=asan.log
   OT_CPP_VERSION: 1.6.0
 steps:
-- uses: actions/checkout@v2
+- uses: actions/checkout@v3
   with:
 fetch-depth: 100
 #
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
index de9a00fd6..3d034617f 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/windows.yml
@@ -35,7 +35,7 @@ jobs:
   - USE_THREAD=1
   - USE_ZLIB=1
 steps:
-- uses: actions/checkout@v2
+- uses: actions/checkout@v3
 - uses: msys2/setup-msys2@v2
   with:
 install: >-
-- 
2.35.1




[PATCH 2/2] CI: Update to actions/cache@v3

2022-04-09 Thread Tim Duesterhus
No functional changes for our use case, but we should keep this current.
---
 .github/workflows/vtest.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
index e413b20f5..7a1c1ef62 100644
--- a/.github/workflows/vtest.yml
+++ b/.github/workflows/vtest.yml
@@ -58,7 +58,7 @@ jobs:
 - name: Cache SSL libs
   if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 
'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
   id: cache_ssl
-  uses: actions/cache@v2
+  uses: actions/cache@v3
   with:
 path: '~/opt/'
 key: ssl-${{ steps.generate-cache-key.outputs.key }}
@@ -66,7 +66,7 @@ jobs:
 - name: Cache OpenTracing
   if: ${{ contains(matrix.FLAGS, 'USE_OT=1') }}
   id: cache_ot
-  uses: actions/cache@v2
+  uses: actions/cache@v3
   with:
 path: '~/opt-ot/'
 key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ 
contains(matrix.name, 'ASAN') }}
-- 
2.35.1




[PATCH 2/4] CLEANUP: Reapply ist.cocci with `--include-headers-for-types --recursive-includes`

2022-03-15 Thread Tim Duesterhus
Previous uses of `ist.cocci` did not add `--include-headers-for-types` and
`--recursive-includes` preventing Coccinelle seeing `struct ist` members of
other structs.

Reapply the patch with proper flags to further clean up the use of the ist API.

The command used was:

spatch -sp_file dev/coccinelle/ist.cocci -in_place --include-headers 
--include-headers-for-types --recursive-includes --dir src/
---
 src/cache.c |  2 +-
 src/fcgi.c  | 12 
 src/flt_http_comp.c | 12 ++--
 src/h1.c|  6 ++
 src/h2.c|  6 +++---
 src/http_fetch.c|  2 +-
 src/http_htx.c  | 10 --
 src/log.c   |  2 +-
 src/mux_fcgi.c  | 10 +-
 src/mux_h1.c|  2 +-
 src/sink.c  |  2 +-
 src/tcpcheck.c  |  2 +-
 12 files changed, 30 insertions(+), 38 deletions(-)

diff --git a/src/cache.c b/src/cache.c
index 60f9a0ed7..0f90af984 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -626,7 +626,7 @@ cache_store_http_payload(struct stream *s, struct filter 
*filter, struct http_ms
 
info = (type << 28) + v.len;
chunk_memcat(, (char *), 
sizeof(info));
-   chunk_memcat(, v.ptr, v.len);
+   chunk_istcat(, v);
to_forward += v.len;
len -= v.len;
break;
diff --git a/src/fcgi.c b/src/fcgi.c
index 1c2543def..dcf2db219 100644
--- a/src/fcgi.c
+++ b/src/fcgi.c
@@ -197,10 +197,8 @@ size_t fcgi_decode_param(const struct buffer *in, size_t 
o, struct fcgi_param *p
if (data < nlen + vlen)
return 0;
 
-   p->n.ptr = b_peek(in, o);
-   p->n.len = nlen;
-   p->v.ptr = b_peek(in, o+nlen);
-   p->v.len = vlen;
+   p->n = ist2(b_peek(in, o), nlen);
+   p->v = ist2(b_peek(in, o + nlen), vlen);
len += nlen + vlen;
 
return len;
@@ -254,10 +252,8 @@ size_t fcgi_aligned_decode_param(const struct buffer *in, 
size_t o, struct fcgi_
if (data < nlen + vlen)
return 0;
 
-   p->n.ptr = in->area + o;
-   p->n.len = nlen;
-   p->v.ptr = in->area + o + nlen;
-   p->v.len = vlen;
+   p->n = ist2(in->area + o, nlen);
+   p->v = ist2(in->area + o + nlen, vlen);
len += nlen + vlen;
 
return len;
diff --git a/src/flt_http_comp.c b/src/flt_http_comp.c
index f2b210a67..08f684e51 100644
--- a/src/flt_http_comp.c
+++ b/src/flt_http_comp.c
@@ -400,26 +400,26 @@ select_compression_request_header(struct comp_state *st, 
struct stream *s, struc
 
qval = ctx.value.ptr + toklen;
while (1) {
-   while (qval < ctx.value.ptr + ctx.value.len && 
HTTP_IS_LWS(*qval))
+   while (qval < istend(ctx.value) && 
HTTP_IS_LWS(*qval))
qval++;
 
-   if (qval >= ctx.value.ptr + ctx.value.len || 
*qval != ';') {
+   if (qval >= istend(ctx.value) || *qval != ';') {
qval = NULL;
break;
}
qval++;
 
-   while (qval < ctx.value.ptr + ctx.value.len && 
HTTP_IS_LWS(*qval))
+   while (qval < istend(ctx.value) && 
HTTP_IS_LWS(*qval))
qval++;
 
-   if (qval >= ctx.value.ptr + ctx.value.len) {
+   if (qval >= istend(ctx.value)) {
qval = NULL;
break;
}
-   if (strncmp(qval, "q=", MIN(ctx.value.ptr + 
ctx.value.len - qval, 2)) == 0)
+   if (strncmp(qval, "q=", MIN(istend(ctx.value) - 
qval, 2)) == 0)
break;
 
-   while (qval < ctx.value.ptr + ctx.value.len && 
*qval != ';')
+   while (qval < istend(ctx.value) && *qval != ';')
qval++;
}
 
diff --git a/src/h1.c b/src/h1.c
index dd208f323..bf546b9d7 100644
--- a/src/h1.c
+++ b/src/h1.c
@@ -428,8 +428,7 @@ int h1_headers_to_hdr_list(char *start, const char *stop,
http_msg_req09_uri_e:
sl.rq.u.len = ptr - sl.rq.u.ptr;
http_msg_req09_ver:
-   sl.rq.v.ptr = ptr;
-   sl.rq.v.len = 0;
+   sl.rq.v = ist2(ptr, 0);
goto http_msg_rqline_eol;
}
state = H1_MSG_RQMETH;
@@ -659,8 +658,7 @@ int h1_headers_to_hdr_list(char *start, const char *stop,

[PATCH 0/4] Using Coccinelle the right way

2022-03-15 Thread Tim Duesterhus
Willy,

I wanted to build a simple reproducer for the "ist in struct" issue to post
on the Coccinelle list and found that it worked if all structs are defined
in the same .c file. Searching the list archives then revealed the

  --include-headers-for-types

flag which fixes the issue we're seeing.

I've fixed a bug in the ist.cocci, reapplied it on the whole tree and then
turned the bugfix into another rule and applied that one.

Best regards

Tim Duesterhus (4):
  DEV: coccinelle: Fix incorrect replacement in ist.cocci
  CLEANUP: Reapply ist.cocci with `--include-headers-for-types
--recursive-includes`
  DEV: coccinelle: Add a new pattern to ist.cocci
  CLEANUP: Reapply ist.cocci

 dev/coccinelle/ist.cocci |  6 ++
 src/cache.c  |  2 +-
 src/fcgi.c   | 12 
 src/flt_http_comp.c  | 12 ++--
 src/h1.c |  6 ++
 src/h2.c |  6 +++---
 src/http_act.c   | 15 +--
 src/http_fetch.c |  2 +-
 src/http_htx.c   | 10 --
 src/log.c|  2 +-
 src/mux_fcgi.c   | 10 +-
 src/mux_h1.c |  2 +-
 src/sink.c   |  2 +-
 src/tcpcheck.c   |  2 +-
 14 files changed, 41 insertions(+), 48 deletions(-)

-- 
2.35.1




[PATCH 3/4] DEV: coccinelle: Add a new pattern to ist.cocci

2022-03-15 Thread Tim Duesterhus
This was previously ignored in "DEV: coccinelle: Fix incorrect replacement in 
ist.cocci",
but is now properly replaced by a simple `ist()` call.
---
 dev/coccinelle/ist.cocci | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
index ea13d39d2..acde626b0 100644
--- a/dev/coccinelle/ist.cocci
+++ b/dev/coccinelle/ist.cocci
@@ -4,8 +4,9 @@ expression p, l;
 @@
 
 (
- i.ptr = p;
- i.len = strlen(i.ptr);
+- i.ptr = p;
+- i.len = strlen(i.ptr);
++ i = ist(p);
 |
 - i.ptr = p;
 - i.len = l;
-- 
2.35.1




[PATCH 4/4] CLEANUP: Reapply ist.cocci

2022-03-15 Thread Tim Duesterhus
This makes use of the newly added:

- i.ptr = p;
- i.len = strlen(i.ptr);
+ i = ist(p);

patch.
---
 src/http_act.c | 15 +--
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/src/http_act.c b/src/http_act.c
index b7ec31241..133a30c6a 100644
--- a/src/http_act.c
+++ b/src/http_act.c
@@ -697,8 +697,7 @@ static enum act_parse_ret parse_http_set_status(const char 
**args, int *orig_arg
if (*args[*orig_arg] && strcmp(args[*orig_arg], "reason") == 0 &&
(*args[*orig_arg + 1] && strcmp(args[*orig_arg + 1], "if") != 0 && 
strcmp(args[*orig_arg + 1], "unless") != 0)) {
(*orig_arg)++;
-   rule->arg.http.str.ptr = strdup(args[*orig_arg]);
-   rule->arg.http.str.len = strlen(rule->arg.http.str.ptr);
+   rule->arg.http.str = ist(strdup(args[*orig_arg]));
(*orig_arg)++;
}
 
@@ -1325,8 +1324,7 @@ static enum act_parse_ret parse_http_auth(const char 
**args, int *orig_arg, stru
memprintf(err, "missing realm value.\n");
return ACT_RET_PRS_ERR;
}
-   rule->arg.http.str.ptr = strdup(args[cur_arg]);
-   rule->arg.http.str.len = strlen(rule->arg.http.str.ptr);
+   rule->arg.http.str = ist(strdup(args[cur_arg]));
cur_arg++;
}
 
@@ -1508,8 +1506,7 @@ static enum act_parse_ret parse_http_set_header(const 
char **args, int *orig_arg
}
 
 
-   rule->arg.http.str.ptr = strdup(args[cur_arg]);
-   rule->arg.http.str.len = strlen(rule->arg.http.str.ptr);
+   rule->arg.http.str = ist(strdup(args[cur_arg]));
LIST_INIT(>arg.http.fmt);
 
if (rule->from == ACT_F_HTTP_REQ) {
@@ -1617,8 +1614,7 @@ static enum act_parse_ret parse_http_replace_header(const 
char **args, int *orig
return ACT_RET_PRS_ERR;
}
 
-   rule->arg.http.str.ptr = strdup(args[cur_arg]);
-   rule->arg.http.str.len = strlen(rule->arg.http.str.ptr);
+   rule->arg.http.str = ist(strdup(args[cur_arg]));
LIST_INIT(>arg.http.fmt);
 
cur_arg++;
@@ -1721,8 +1717,7 @@ static enum act_parse_ret parse_http_del_header(const 
char **args, int *orig_arg
return ACT_RET_PRS_ERR;
}
 
-   rule->arg.http.str.ptr = strdup(args[cur_arg]);
-   rule->arg.http.str.len = strlen(rule->arg.http.str.ptr);
+   rule->arg.http.str = ist(strdup(args[cur_arg]));
px->conf.args.ctx = (rule->from == ACT_F_HTTP_REQ ? ARGC_HRQ : 
ARGC_HRS);
 
LIST_INIT(>arg.http.fmt);
-- 
2.35.1




[PATCH 1/4] DEV: coccinelle: Fix incorrect replacement in ist.cocci

2022-03-15 Thread Tim Duesterhus
We must not use `ist2()` if the value of `i.len` is derived from the value of
`i.ptr`:

i.ptr = "foo";
i.len = strlen(i.ptr);
---
 dev/coccinelle/ist.cocci | 5 +
 1 file changed, 5 insertions(+)

diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
index 4945141b2..ea13d39d2 100644
--- a/dev/coccinelle/ist.cocci
+++ b/dev/coccinelle/ist.cocci
@@ -3,9 +3,14 @@ struct ist i;
 expression p, l;
 @@
 
+(
+ i.ptr = p;
+ i.len = strlen(i.ptr);
+|
 - i.ptr = p;
 - i.len = l;
 + i = ist2(p, l);
+)
 
 @@
 @@
-- 
2.35.1




[PATCH] REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+

2022-03-11 Thread Tim Duesterhus
Introduced in:

0657b9338 MINOR: stream: add "last_rule_file" and "last_rule_line" samples
---
 reg-tests/log/last_rule.vtc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/reg-tests/log/last_rule.vtc b/reg-tests/log/last_rule.vtc
index b57251912..e69516654 100644
--- a/reg-tests/log/last_rule.vtc
+++ b/reg-tests/log/last_rule.vtc
@@ -1,7 +1,7 @@
 varnishtest "Verify logging of last final rule"
-feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.6
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
+feature ignore_unknown_macro
 
 server s1 {
 rxreq
-- 
2.35.1




[PATCH 6/6] CLEANUP: fcgi: Use `istadv()` in `fcgi_strm_send_params`

2022-03-04 Thread Tim Duesterhus
Found manually, while creating the previous commits to turn `struct proxy`
members into ists.

There is an existing Coccinelle rule to replace this pattern by `istadv()` in
`ist.cocci`:

@@
struct ist i;
expression e;
@@

- i.ptr += e;
- i.len -= e;
+ i = istadv(i, e);

But apparently it is not smart enough to match ists that are stored in another
struct. It would be useful to make the existing rule more generic, so that it
might catch similar cases in the future.
---
 src/mux_fcgi.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index 0a8679019..ef264edae 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -1980,8 +1980,7 @@ static size_t fcgi_strm_send_params(struct fcgi_conn 
*fconn, struct fcgi_strm *f
p.v = htx_get_blk_value(htx, blk);
 
if (istmatch(p.n, ist(":fcgi-"))) {
-   p.n.ptr += 6;
-   p.n.len -= 6;
+   p.n = istadv(p.n, 6);
if (isteq(p.n, 
ist("gateway_interface")))
params.mask |= 
FCGI_SP_CGI_GATEWAY;
else if (isteq(p.n, 
ist("document_root"))) {
-- 
2.35.1




[PATCH 5/6] CLEANUP: fcgi: Replace memcpy() on ist by istcat()

2022-03-04 Thread Tim Duesterhus
This is a little cleaner, because the length of the resulting string does not
need to be calculated manually.
---
 src/mux_fcgi.c | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index a22bc9391..0a8679019 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -2030,6 +2030,8 @@ static size_t fcgi_strm_send_params(struct fcgi_conn 
*fconn, struct fcgi_strm *f
else if (isteq(p.n, ist("content-type")))
p.n = ist("CONTENT_TYPE");
else {
+   struct ist n;
+
if (isteq(p.n, ist("host")))
params.srv_name = p.v;
else if (isteq(p.n, ist("te"))) {
@@ -2046,9 +2048,10 @@ static size_t fcgi_strm_send_params(struct fcgi_conn 
*fconn, struct fcgi_strm *f
if 
(isttest(fconn->proxy->server_id_hdr_name) && isteq(p.n, 
fconn->proxy->server_id_hdr_name))
break;
 
-   memcpy(trash.area, "http_", 5);
-   memcpy(trash.area+5, p.n.ptr, p.n.len);
-   p.n = ist2(trash.area, p.n.len+5);
+   n = ist2(trash.area, 0);
+   istcat(, ist("http_"), trash.size);
+   istcat(, p.n, trash.size);
+   p.n = n;
}
 
if (!fcgi_encode_param(, )) {
-- 
2.35.1




[PATCH 0/6] 'ist'ify members of struct proxy

2022-03-04 Thread Tim Duesterhus
Willy,
Christopher,

find a series that converts a few members of `struct proxy` into ists. All
of them have already been converted into ists when operating on them, so
directly storing them as ists makes that code cleaner.

One drawback is that `struct proxy` grows by 16 bytes. It might or might not
be necessary to reorder the struct members to keep it efficient.

The `server_id_hdr_name` one is tagged MEDIUM, because that required some
non-trivial changes in the FCGI implementation. As I've needed to modify
that code anyway, I've also added two additional CLEANUP commits.

As for the second CLEANUP commit: If one of you knows how to fix the Coccinelle
patch to detect that specific pattern, I'd appreciate if you could make the
necessary changes to ist.cocci. Unfortunately my Coccinelle skills are not
good enough.

I've tested that HAProxy compiles and that the existing reg-tests pass, but
I didn't specifically test FCGI (and there are no exiting reg-tests for that).
So please carefully check the patches for dumb mistakes.

Best regards

Tim Duesterhus (6):
  MINOR: proxy: Store monitor_uri as a `struct ist`
  MINOR: proxy: Store fwdfor_hdr_name as a `struct ist`
  MINOR: proxy: Store orgto_hdr_name as a `struct ist`
  MEDIUM: proxy: Store server_id_hdr_name as a `struct ist`
  CLEANUP: fcgi: Replace memcpy() on ist by istcat()
  CLEANUP: fcgi: Use `istadv()` in `fcgi_strm_send_params`

 include/haproxy/proxy-t.h | 12 --
 src/cfgparse-listen.c | 46 ---
 src/http_ana.c| 11 --
 src/mux_fcgi.c| 23 ++--
 src/mux_h1.c  |  8 +++
 src/mux_h2.c  |  8 +++
 src/proxy.c   | 37 +--
 7 files changed, 62 insertions(+), 83 deletions(-)

-- 
2.35.1




[PATCH 1/6] MINOR: proxy: Store monitor_uri as a `struct ist`

2022-03-04 Thread Tim Duesterhus
The monitor_uri is already processed as an ist in `http_wait_for_request`, lets
also just store it as such.

see 0643b0e7e ("MINOR: proxy: Make `header_unique_id` a `struct ist`") for a
very similar past commit.
---
 include/haproxy/proxy-t.h |  3 +--
 src/cfgparse-listen.c |  9 +++--
 src/http_ana.c|  5 ++---
 src/proxy.c   | 11 +--
 4 files changed, 11 insertions(+), 17 deletions(-)

diff --git a/include/haproxy/proxy-t.h b/include/haproxy/proxy-t.h
index 421f900e2..13e722fbf 100644
--- a/include/haproxy/proxy-t.h
+++ b/include/haproxy/proxy-t.h
@@ -322,8 +322,7 @@ struct proxy {
int srvtcpka_cnt;   /* The maximum number of 
keepalive probes TCP should send before dropping the connection. (server side) 
*/
int srvtcpka_idle;  /* The time (in seconds) the 
connection needs to remain idle before TCP starts sending keepalive probes. 
(server side) */
int srvtcpka_intvl; /* The time (in seconds) 
between individual keepalive probes. (server side) */
-   int monitor_uri_len;/* length of the string above. 
0 if unused */
-   char *monitor_uri;  /* a special URI to which we 
respond with HTTP/200 OK */
+   struct ist monitor_uri; /* a special URI to which we 
respond with HTTP/200 OK */
struct list mon_fail_cond;  /* list of conditions to fail 
monitoring requests (chained) */
struct {/* WARNING! check 
proxy_reset_timeouts() in proxy.h !!! */
int client; /* client I/O timeout (in 
ticks) */
diff --git a/src/cfgparse-listen.c b/src/cfgparse-listen.c
index 5deec5e6b..eb58b2eb1 100644
--- a/src/cfgparse-listen.c
+++ b/src/cfgparse-listen.c
@@ -575,13 +575,10 @@ int cfg_parse_listen(const char *file, int linenum, char 
**args, int kwm)
goto out;
}
 
-   free(curproxy->monitor_uri);
-   curproxy->monitor_uri_len = strlen(args[1]);
-   curproxy->monitor_uri = calloc(1, curproxy->monitor_uri_len + 
1);
-   if (!curproxy->monitor_uri)
+   istfree(>monitor_uri);
+   curproxy->monitor_uri = istdup(ist(args[1]));
+   if (!isttest(curproxy->monitor_uri))
goto alloc_error;
-   memcpy(curproxy->monitor_uri, args[1], 
curproxy->monitor_uri_len);
-   curproxy->monitor_uri[curproxy->monitor_uri_len] = '\0';
 
goto out;
}
diff --git a/src/http_ana.c b/src/http_ana.c
index f33eb7790..b60927e52 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -203,9 +203,8 @@ int http_wait_for_request(struct stream *s, struct channel 
*req, int an_bit)
 * used. It is a workaround to let HTTP/2 health-checks work as
 * expected.
 */
-   if (unlikely(sess->fe->monitor_uri_len != 0)) {
-   const struct ist monitor_uri = ist2(sess->fe->monitor_uri,
-   sess->fe->monitor_uri_len);
+   if (unlikely(isttest(sess->fe->monitor_uri))) {
+   const struct ist monitor_uri = sess->fe->monitor_uri;
struct http_uri_parser parser = 
http_uri_parser_init(htx_sl_req_uri(sl));
 
if ((istptr(monitor_uri)[0] == '/' &&
diff --git a/src/proxy.c b/src/proxy.c
index 946fe13d5..e5cf81327 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -156,7 +156,7 @@ void free_proxy(struct proxy *p)
free(p->lbprm.arg_str);
free(p->server_state_file_name);
free(p->capture_name);
-   free(p->monitor_uri);
+   istfree(>monitor_uri);
free(p->rdp_cookie_name);
free(p->invalid_rep);
free(p->invalid_req);
@@ -1270,7 +1270,7 @@ int proxy_cfg_ensure_no_http(struct proxy *curproxy)
ha_warning("cookie will be ignored for %s '%s' (needs 'mode 
http').\n",
   proxy_type_str(curproxy), curproxy->id);
}
-   if (curproxy->monitor_uri != NULL) {
+   if (isttest(curproxy->monitor_uri)) {
ha_warning("monitor-uri will be ignored for %s '%s' (needs 
'mode http').\n",
   proxy_type_str(curproxy), curproxy->id);
}
@@ -1432,7 +1432,7 @@ void proxy_free_defaults(struct proxy *defproxy)
ha_free(>cookie_attrs);
ha_free(>lbprm.arg_str);
ha_free(>capture_name);
-   ha_free(>monitor_uri);
+   istfree(>monitor_uri);
ha_free(>defbe.name);
ha_free(>conn_src.iface_name);
ha_free(>fwdfor_hdr_name); defproxy->fwdfor_hdr_len = 0;
@@ -1707,9 +1707,8 @@ static int proxy_defproxy_cpy(struct proxy *curproxy, 
const struct proxy *defpro
curproxy->timeout.tarpit = defproxy->timeout.tarpit;
curproxy->timeout.httpreq = defproxy->timeout.httpreq;

[PATCH 4/6] MEDIUM: proxy: Store server_id_hdr_name as a `struct ist`

2022-03-04 Thread Tim Duesterhus
The server_id_hdr_name is already processed as an ist in various locations lets
also just store it as such.

see 0643b0e7e ("MINOR: proxy: Make `header_unique_id` a `struct ist`") for a
very similar past commit.
---
 include/haproxy/proxy-t.h |  3 +--
 src/cfgparse-listen.c |  9 -
 src/mux_fcgi.c| 11 +--
 src/mux_h1.c  |  8 
 src/mux_h2.c  |  8 
 src/proxy.c   |  8 +++-
 6 files changed, 21 insertions(+), 26 deletions(-)

diff --git a/include/haproxy/proxy-t.h b/include/haproxy/proxy-t.h
index 805e1b452..80431757e 100644
--- a/include/haproxy/proxy-t.h
+++ b/include/haproxy/proxy-t.h
@@ -354,8 +354,7 @@ struct proxy {
struct net_addr except_xot_net; /* don't x-original-to for this 
address. */
struct ist fwdfor_hdr_name; /* header to use - 
default: "x-forwarded-for" */
struct ist orgto_hdr_name;  /* header to use - 
default: "x-original-to" */
-   char *server_id_hdr_name;   /* the header to use to 
send the server id (name) */
-   int server_id_hdr_len;  /* the length of the id 
(name) header... name */
+   struct ist server_id_hdr_name;   /* the header to use 
to send the server id (name) */
int conn_retries;   /* maximum number of connect 
retries */
unsigned int retry_type;/* Type of retry allowed */
int redispatch_after;   /* number of retries before 
redispatch */
diff --git a/src/cfgparse-listen.c b/src/cfgparse-listen.c
index 121f1deac..216e6d8d5 100644
--- a/src/cfgparse-listen.c
+++ b/src/cfgparse-listen.c
@@ -1383,12 +1383,11 @@ int cfg_parse_listen(const char *file, int linenum, 
char **args, int kwm)
}
 
/* set the desired header name, in lower case */
-   free(curproxy->server_id_hdr_name);
-   curproxy->server_id_hdr_name = strdup(args[1]);
-   if (!curproxy->server_id_hdr_name)
+   istfree(>server_id_hdr_name);
+   curproxy->server_id_hdr_name = istdup(ist(args[1]));
+   if (!isttest(curproxy->server_id_hdr_name))
goto alloc_error;
-   curproxy->server_id_hdr_len  = 
strlen(curproxy->server_id_hdr_name);
-   ist2bin_lc(curproxy->server_id_hdr_name, 
ist2(curproxy->server_id_hdr_name, curproxy->server_id_hdr_len));
+   ist2bin_lc(istptr(curproxy->server_id_hdr_name), 
curproxy->server_id_hdr_name);
}
else if (strcmp(args[0], "block") == 0) {
ha_alert("parsing [%s:%d] : The '%s' directive is not supported 
anymore since HAProxy 2.1. Use 'http-request deny' which uses the exact same 
syntax.\n", file, linenum, args[0]);
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index b5b280749..a22bc9391 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -2043,8 +2043,7 @@ static size_t fcgi_strm_send_params(struct fcgi_conn 
*fconn, struct fcgi_strm *f
}
 
/* Skip header if same name is used to 
add the server name */
-   if (fconn->proxy->server_id_hdr_name &&
-   isteq(p.n, 
ist2(fconn->proxy->server_id_hdr_name, fconn->proxy->server_id_hdr_len)))
+   if 
(isttest(fconn->proxy->server_id_hdr_name) && isteq(p.n, 
fconn->proxy->server_id_hdr_name))
break;
 
memcpy(trash.area, "http_", 5);
@@ -2062,15 +2061,15 @@ static size_t fcgi_strm_send_params(struct fcgi_conn 
*fconn, struct fcgi_strm *f
break;
 
case HTX_BLK_EOH:
-   if (fconn->proxy->server_id_hdr_name) {
+   if (isttest(fconn->proxy->server_id_hdr_name)) {
struct server *srv = 
objt_server(fconn->conn->target);
 
if (!srv)
goto done;
 
-   memcpy(trash.area, "http_", 5);
-   memcpy(trash.area+5, 
fconn->proxy->server_id_hdr_name, fconn->proxy->server_id_hdr_len);
-   p.n = ist2(trash.area, 
fconn->proxy->server_id_hdr_len+5);
+   p.n = ist2(trash.area, 0);
+   istcat(, ist("http_"), trash.size);
+   istcat(, 
fconn->proxy->server_id_hdr_name, trash.size);
p.v = ist(srv->id);
 
if (!fcgi_encode_param(, )) {
diff --git a/src/mux_h1.c b/src/mux_h1.c
index 

[PATCH 3/6] MINOR: proxy: Store orgto_hdr_name as a `struct ist`

2022-03-04 Thread Tim Duesterhus
The orgto_hdr_name is already processed as an ist in `http_process_request`,
lets also just store it as such.

see 0643b0e7e ("MINOR: proxy: Make `header_unique_id` a `struct ist`") for a
very similar past commit.
---
 include/haproxy/proxy-t.h |  3 +--
 src/cfgparse-listen.c | 14 ++
 src/http_ana.c|  3 +--
 src/proxy.c   |  8 +++-
 4 files changed, 11 insertions(+), 17 deletions(-)

diff --git a/include/haproxy/proxy-t.h b/include/haproxy/proxy-t.h
index 8277c098e..805e1b452 100644
--- a/include/haproxy/proxy-t.h
+++ b/include/haproxy/proxy-t.h
@@ -353,8 +353,7 @@ struct proxy {
struct net_addr except_xff_net; /* don't x-forward-for for this 
address. */
struct net_addr except_xot_net; /* don't x-original-to for this 
address. */
struct ist fwdfor_hdr_name; /* header to use - 
default: "x-forwarded-for" */
-   char *orgto_hdr_name;   /* header to use - default: 
"x-original-to" */
-   int orgto_hdr_len;  /* length of "x-original-to" 
header */
+   struct ist orgto_hdr_name;  /* header to use - 
default: "x-original-to" */
char *server_id_hdr_name;   /* the header to use to 
send the server id (name) */
int server_id_hdr_len;  /* the length of the id 
(name) header... name */
int conn_retries;   /* maximum number of connect 
retries */
diff --git a/src/cfgparse-listen.c b/src/cfgparse-listen.c
index d858c3446..121f1deac 100644
--- a/src/cfgparse-listen.c
+++ b/src/cfgparse-listen.c
@@ -2399,11 +2399,10 @@ int cfg_parse_listen(const char *file, int linenum, 
char **args, int kwm)
 
curproxy->options |= PR_O_ORGTO;
 
-   free(curproxy->orgto_hdr_name);
-   curproxy->orgto_hdr_name = strdup(DEF_XORIGINALTO_HDR);
-   if (!curproxy->orgto_hdr_name)
+   istfree(>orgto_hdr_name);
+   curproxy->orgto_hdr_name = 
istdup(ist(DEF_XORIGINALTO_HDR));
+   if (!isttest(curproxy->orgto_hdr_name))
goto alloc_error;
-   curproxy->orgto_hdr_len  = strlen(DEF_XORIGINALTO_HDR);
curproxy->except_xot_net.family = AF_UNSPEC;
 
/* loop to go through arguments - start at 2, since 0+1 
= "option" "originalto" */
@@ -2441,11 +2440,10 @@ int cfg_parse_listen(const char *file, int linenum, 
char **args, int kwm)
err_code |= ERR_ALERT | 
ERR_FATAL;
goto out;
}
-   free(curproxy->orgto_hdr_name);
-   curproxy->orgto_hdr_name = 
strdup(args[cur_arg+1]);
-   if (!curproxy->orgto_hdr_name)
+   istfree(>orgto_hdr_name);
+   curproxy->orgto_hdr_name = 
istdup(ist(args[cur_arg+1]));
+   if (!isttest(curproxy->orgto_hdr_name))
goto alloc_error;
-   curproxy->orgto_hdr_len  = 
strlen(curproxy->orgto_hdr_name);
cur_arg += 2;
} else {
/* unknown suboption - catchall */
diff --git a/src/http_ana.c b/src/http_ana.c
index f02b8446b..83711482f 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -711,8 +711,7 @@ int http_process_request(struct stream *s, struct channel 
*req, int an_bit)
 */
if ((sess->fe->options | s->be->options) & PR_O_ORGTO) {
const struct sockaddr_storage *dst = si_dst(cs_si(s->csf));
-   struct ist hdr = ist2(s->be->orgto_hdr_len ? 
s->be->orgto_hdr_name : sess->fe->orgto_hdr_name,
- s->be->orgto_hdr_len ? 
s->be->orgto_hdr_len  : sess->fe->orgto_hdr_len);
+   struct ist hdr = isttest(s->be->orgto_hdr_name) ? 
s->be->orgto_hdr_name : sess->fe->orgto_hdr_name;
 
if (dst && dst->ss_family == AF_INET) {
/* Add an X-Original-To header unless the destination 
IP is
diff --git a/src/proxy.c b/src/proxy.c
index 53ca5db29..f051768ac 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -1436,7 +1436,7 @@ void proxy_free_defaults(struct proxy *defproxy)
ha_free(>defbe.name);
ha_free(>conn_src.iface_name);
istfree(>fwdfor_hdr_name);
-   ha_free(>orgto_hdr_name); defproxy->orgto_hdr_len = 0;
+   istfree(>orgto_hdr_name);
ha_free(>server_id_hdr_name); defproxy->server_id_hdr_len = 0;
 
list_for_each_entry_safe(acl, aclb, >acl, list) {
@@ -1600,10 

[PATCH 2/6] MINOR: proxy: Store fwdfor_hdr_name as a `struct ist`

2022-03-04 Thread Tim Duesterhus
The fwdfor_hdr_name is already processed as an ist in `http_process_request`,
lets also just store it as such.

see 0643b0e7e ("MINOR: proxy: Make `header_unique_id` a `struct ist`") for a
very similar past commit.
---
 include/haproxy/proxy-t.h |  3 +--
 src/cfgparse-listen.c | 14 ++
 src/http_ana.c|  3 +--
 src/proxy.c   | 10 --
 4 files changed, 12 insertions(+), 18 deletions(-)

diff --git a/include/haproxy/proxy-t.h b/include/haproxy/proxy-t.h
index 13e722fbf..8277c098e 100644
--- a/include/haproxy/proxy-t.h
+++ b/include/haproxy/proxy-t.h
@@ -352,9 +352,8 @@ struct proxy {
unsigned int tot_fe_maxconn;/* #maxconn of frontends linked 
to that backend, it is used to compute fullconn */
struct net_addr except_xff_net; /* don't x-forward-for for this 
address. */
struct net_addr except_xot_net; /* don't x-original-to for this 
address. */
-   char *fwdfor_hdr_name;  /* header to use - default: 
"x-forwarded-for" */
+   struct ist fwdfor_hdr_name; /* header to use - 
default: "x-forwarded-for" */
char *orgto_hdr_name;   /* header to use - default: 
"x-original-to" */
-   int fwdfor_hdr_len; /* length of "x-forwarded-for" 
header */
int orgto_hdr_len;  /* length of "x-original-to" 
header */
char *server_id_hdr_name;   /* the header to use to 
send the server id (name) */
int server_id_hdr_len;  /* the length of the id 
(name) header... name */
diff --git a/src/cfgparse-listen.c b/src/cfgparse-listen.c
index eb58b2eb1..d858c3446 100644
--- a/src/cfgparse-listen.c
+++ b/src/cfgparse-listen.c
@@ -2331,11 +2331,10 @@ int cfg_parse_listen(const char *file, int linenum, 
char **args, int kwm)
 
curproxy->options |= PR_O_FWDFOR | PR_O_FF_ALWAYS;
 
-   free(curproxy->fwdfor_hdr_name);
-   curproxy->fwdfor_hdr_name = strdup(DEF_XFORWARDFOR_HDR);
-   if (!curproxy->fwdfor_hdr_name)
+   istfree(>fwdfor_hdr_name);
+   curproxy->fwdfor_hdr_name = 
istdup(ist(DEF_XFORWARDFOR_HDR));
+   if (!isttest(curproxy->fwdfor_hdr_name))
goto alloc_error;
-   curproxy->fwdfor_hdr_len  = strlen(DEF_XFORWARDFOR_HDR);
curproxy->except_xff_net.family = AF_UNSPEC;
 
/* loop to go through arguments - start at 2, since 0+1 
= "option" "forwardfor" */
@@ -2374,11 +2373,10 @@ int cfg_parse_listen(const char *file, int linenum, 
char **args, int kwm)
err_code |= ERR_ALERT | 
ERR_FATAL;
goto out;
}
-   free(curproxy->fwdfor_hdr_name);
-   curproxy->fwdfor_hdr_name = 
strdup(args[cur_arg+1]);
-   if (!curproxy->fwdfor_hdr_name)
+   istfree(>fwdfor_hdr_name);
+   curproxy->fwdfor_hdr_name = 
istdup(ist(args[cur_arg+1]));
+   if (!isttest(curproxy->fwdfor_hdr_name))
goto alloc_error;
-   curproxy->fwdfor_hdr_len  = 
strlen(curproxy->fwdfor_hdr_name);
cur_arg += 2;
} else if (strcmp(args[cur_arg], "if-none") == 
0) {
curproxy->options &= ~PR_O_FF_ALWAYS;
diff --git a/src/http_ana.c b/src/http_ana.c
index b60927e52..f02b8446b 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -655,8 +655,7 @@ int http_process_request(struct stream *s, struct channel 
*req, int an_bit)
if ((sess->fe->options | s->be->options) & PR_O_FWDFOR) {
const struct sockaddr_storage *src = si_src(cs_si(s->csf));
struct http_hdr_ctx ctx = { .blk = NULL };
-   struct ist hdr = ist2(s->be->fwdfor_hdr_len ? 
s->be->fwdfor_hdr_name : sess->fe->fwdfor_hdr_name,
- s->be->fwdfor_hdr_len ? 
s->be->fwdfor_hdr_len : sess->fe->fwdfor_hdr_len);
+   struct ist hdr = isttest(s->be->fwdfor_hdr_name) ? 
s->be->fwdfor_hdr_name : sess->fe->fwdfor_hdr_name;
 
if (!((sess->fe->options | s->be->options) & PR_O_FF_ALWAYS) &&
http_find_header(htx, hdr, , 0)) {
diff --git a/src/proxy.c b/src/proxy.c
index e5cf81327..53ca5db29 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -332,7 +332,7 @@ void free_proxy(struct proxy *p)
pxdf->fct(p);
 
free(p->desc);
-   free(p->fwdfor_hdr_name);
+   

[PATCH] MINOR: queue: Replace if() + abort() with BUG_ON()

2022-02-28 Thread Tim Duesterhus
see 5cd4bbd7a ("BUG/MAJOR: threads/queue: Fix thread-safety issues on the 
queues management")
---
 src/queue.c | 15 +++
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/src/queue.c b/src/queue.c
index b1be766b9..002b94b85 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -567,14 +567,13 @@ int pendconn_dequeue(struct stream *strm)
struct pendconn *p;
int is_unlinked;
 
-   if (unlikely(!strm->pend_pos)) {
-   /* unexpected case because it is called by the stream itself and
-* only the stream can release a pendconn. So it is only
-* possible if a pendconn is released by someone else or if the
-* stream is supposed to be queued but without its associated
-* pendconn. In both cases it is a bug! */
-   abort();
-   }
+   /* unexpected case because it is called by the stream itself and
+* only the stream can release a pendconn. So it is only
+* possible if a pendconn is released by someone else or if the
+* stream is supposed to be queued but without its associated
+* pendconn. In both cases it is a bug! */
+   BUG_ON(!strm->pend_pos);
+
p = strm->pend_pos;
 
/* note below : we need to grab the queue's lock to check for emptiness
-- 
2.35.1




[PATCH 2/2] MINOR: connection: Transform safety check in PROXYv2 parsing into BUG_ON()

2022-02-25 Thread Tim Duesterhus
With BUG_ON() being enabled by default it is more useful to use a BUG_ON()
instead of an effectively never-taken if, as any incorrect assumptions will
become much more visible.

see 488ee7fb6e4a388bb68153341826a6391da794e9
---
 src/connection.c | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/src/connection.c b/src/connection.c
index f78028451..c156d9313 100644
--- a/src/connection.c
+++ b/src/connection.c
@@ -1098,12 +1098,11 @@ int conn_recv_proxy(struct connection *conn, int flag)
}
 
/* Verify that the PROXYv2 header ends at a TLV boundary.
-* This is technically unreachable, because the TLV parsing 
already
-* verifies that a TLV does not exceed the total length and also
-* that there is space for a TLV header.
+* This is can not be true, because the TLV parsing already
+* verifies that a TLV does not exceed the total length and
+* also that there is space for a TLV header.
 */
-   if (tlv_offset != total_v2_len)
-   goto bad_header;
+   BUG_ON(tlv_offset != total_v2_len);
 
/* unsupported protocol, keep local connection address */
break;
-- 
2.35.1




[PATCH 1/2] CLEANUP: connection: Indicate unreachability to the compiler in conn_recv_proxy

2022-02-25 Thread Tim Duesterhus
Transform the unreachability comment into a call to `my_unreachable()` to allow
the compiler from benefitting from it.

see d1b15b6e9b4d4d378a6169929a86f25b95eafc57
see 615f81eb5ad3e8c691901db8ce3e6a4a6b6efa49
---
 src/connection.c | 10 --
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/src/connection.c b/src/connection.c
index 1d53eed79..f78028451 100644
--- a/src/connection.c
+++ b/src/connection.c
@@ -1073,9 +1073,8 @@ int conn_recv_proxy(struct connection *conn, int flag)
if (!isttest(conn->proxy_authority))
goto fail;
if (istcpy(>proxy_authority, tlv, 
PP2_AUTHORITY_MAX) < 0) {
-   /* This is technically unreachable, 
because we verified above
-* that the TLV value fits.
-*/
+   /* This is impossible, because we 
verified that the TLV value fits. */
+   my_unreachable();
goto fail;
}
break;
@@ -1087,9 +1086,8 @@ int conn_recv_proxy(struct connection *conn, int flag)
if (!isttest(conn->proxy_unique_id))
goto fail;
if (istcpy(>proxy_unique_id, tlv, 
UNIQUEID_LEN) < 0) {
-   /* This is technically unreachable, 
because we verified above
-* that the TLV value fits.
-*/
+   /* This is impossible, because we 
verified that the TLV value fits. */
+   my_unreachable();
goto fail;
}
break;
-- 
2.35.1




[PATCH] CI: Consistently use actions/checkout@v2

2022-01-28 Thread Tim Duesterhus
v2 is the current version of the checkout action and faster than v1.
---
 .github/workflows/compliance.yml   | 2 +-
 .github/workflows/musl.yml | 2 +-
 .github/workflows/openssl-nodeprecated.yml | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml
index 3ce717805..75f3cdab0 100644
--- a/.github/workflows/compliance.yml
+++ b/.github/workflows/compliance.yml
@@ -21,7 +21,7 @@ jobs:
 env:
   H2SPEC_VERSION: '2.6.0'
 steps:
-- uses: actions/checkout@v1
+- uses: actions/checkout@v2
 - name: Install h2spec
   run: |
 curl -fsSL 
https://github.com/summerwind/h2spec/releases/download/v${H2SPEC_VERSION}/h2spec_linux_amd64.tar.gz
 -o h2spec.tar.gz
diff --git a/.github/workflows/musl.yml b/.github/workflows/musl.yml
index c106b1d05..aba3f4e14 100644
--- a/.github/workflows/musl.yml
+++ b/.github/workflows/musl.yml
@@ -13,7 +13,7 @@ jobs:
   container:
 image: alpine:latest
   steps:
-  - uses: actions/checkout@master
+  - uses: actions/checkout@v2
   - name: Install dependencies
 run: apk add gcc make tar git python3 libc-dev linux-headers pcre-dev 
pcre2-dev openssl-dev lua5.3-dev grep socat curl
   - name: Install VTest
diff --git a/.github/workflows/openssl-nodeprecated.yml 
b/.github/workflows/openssl-nodeprecated.yml
index e62dbf0d8..e423f58dd 100644
--- a/.github/workflows/openssl-nodeprecated.yml
+++ b/.github/workflows/openssl-nodeprecated.yml
@@ -21,7 +21,7 @@ jobs:
   test:
 runs-on: ubuntu-latest
 steps:
-- uses: actions/checkout@v1
+- uses: actions/checkout@v2
 - name: Install VTest
   run: |
 scripts/build-vtest.sh
-- 
2.35.0




[PATCH] REGTESTS: Remove REQUIRE_VERSION=1.8 from all tests

2022-01-28 Thread Tim Duesterhus
HAProxy 1.8 is the lowest supported version, thus this always matches.

see 1b095cac9468d0c3eeb157e9b1a2947487bd3c83
---
 reg-tests/checks/agent-check.vtc  | 1 -
 reg-tests/seamless-reload/abns_socket.vtc | 1 -
 reg-tests/server/cli_set_fdqn.vtc | 1 -
 3 files changed, 3 deletions(-)

diff --git a/reg-tests/checks/agent-check.vtc b/reg-tests/checks/agent-check.vtc
index 2744a6232..5cf51c658 100644
--- a/reg-tests/checks/agent-check.vtc
+++ b/reg-tests/checks/agent-check.vtc
@@ -1,5 +1,4 @@
 varnishtest "Health-checks: agent-check"
-#REQUIRE_VERSION=1.8
 #REGTEST_TYPE=slow
 feature ignore_unknown_macro
 
diff --git a/reg-tests/seamless-reload/abns_socket.vtc 
b/reg-tests/seamless-reload/abns_socket.vtc
index 2ea8dd387..ce1b156c0 100644
--- a/reg-tests/seamless-reload/abns_socket.vtc
+++ b/reg-tests/seamless-reload/abns_socket.vtc
@@ -19,7 +19,6 @@ feature ignore_unknown_macro
 
 # abns@ sockets are not available on freebsd
 #EXCLUDE_TARGETS=freebsd,osx,generic
-#REQUIRE_VERSION=1.8
 #REGTEST_TYPE=broken
 
 haproxy h1 -W -conf {
diff --git a/reg-tests/server/cli_set_fdqn.vtc 
b/reg-tests/server/cli_set_fdqn.vtc
index 3055f7677..86f32b6c8 100644
--- a/reg-tests/server/cli_set_fdqn.vtc
+++ b/reg-tests/server/cli_set_fdqn.vtc
@@ -3,7 +3,6 @@ varnishtest "Set server FQDN via CLI crash"
 feature ignore_unknown_macro
 
 # for "set server  fqdn"
-#REQUIRE_VERSION=1.8
 #REGTEST_TYPE=bug
 
 # Do nothing. Is there only to create s1_* macros
-- 
2.35.0




[PATCH] BUG/MEDIUM: sample: Fix memory leak in sample_conv_jwt_member_query

2021-12-01 Thread Tim Duesterhus
The function leaked one full buffer per invocation. Fix this by simply removing
the call to alloc_trash_chunk(), the static chunk from get_trash_chunk() is
sufficient.

This bug was introduced in 0a72f5ee7c2a61bdb379436461269315c776b50a, which is
2.5-dev10. This fix needs to be backported to 2.5+.
---
 src/sample.c | 4 
 1 file changed, 4 deletions(-)

diff --git a/src/sample.c b/src/sample.c
index 5abf4712a..63816be5d 100644
--- a/src/sample.c
+++ b/src/sample.c
@@ -3584,10 +3584,6 @@ static int sample_conv_jwt_member_query(const struct arg 
*args, struct sample *s
if (item_num < member + 1)
goto end;
 
-   decoded_header = alloc_trash_chunk();
-   if (!decoded_header)
-   goto end;
-
ret = base64urldec(items[member].start, items[member].length,
   decoded_header->area, decoded_header->size);
if (ret == -1)
-- 
2.34.1




[PATCH] CLEANUP: Wrap `accept4_broken = 1` into additional parenthesis

2021-11-20 Thread Tim Duesterhus
This makes it clear to static analysis tools that this assignment is
intentional and not a mistyped comparison.
---
 src/sock.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/sock.c b/src/sock.c
index e3d4a6e4c..f11c5b0c4 100644
--- a/src/sock.c
+++ b/src/sock.c
@@ -74,7 +74,7 @@ struct connection *sock_accept_conn(struct listener *l, int 
*status)
(((cfd = accept4(l->rx.fd, (struct sockaddr*)addr, ,
 SOCK_NONBLOCK | (master ? SOCK_CLOEXEC : 0))) == 
-1) &&
 (errno == ENOSYS || errno == EINVAL || errno == EBADF) &&
-(accept4_broken = 1)))
+((accept4_broken = 1
 #endif
{
laddr = sizeof(*conn->src);
-- 
2.34.0




[PATCH 5/6] CLEANUP: Apply ist.cocci

2021-11-08 Thread Tim Duesterhus
This is to make use of `chunk_istcat()`.
---
 src/cache.c  |  2 +-
 src/http_fetch.c |  2 +-
 src/http_htx.c   |  4 ++--
 src/mux_fcgi.c   | 10 +-
 src/tcpcheck.c   |  4 ++--
 5 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/src/cache.c b/src/cache.c
index e871a7b30..ee42947c1 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -1644,7 +1644,7 @@ int sha1_hosturi(struct stream *s)
chunk_istcat(trash, ctx.value);
}
 
-   chunk_memcat(trash, uri.ptr, uri.len);
+   chunk_istcat(trash, uri);
 
/* hash everything */
blk_SHA1_Init(_ctx);
diff --git a/src/http_fetch.c b/src/http_fetch.c
index aa4965d0f..99dc89a51 100644
--- a/src/http_fetch.c
+++ b/src/http_fetch.c
@@ -874,7 +874,7 @@ static int smp_fetch_hdr_names(const struct arg *args, 
struct sample *smp, const
 
if (temp->data)
temp->area[temp->data++] = del;
-   chunk_memcat(temp, n.ptr, n.len);
+   chunk_istcat(temp, n);
}
 
smp->data.type = SMP_T_STR;
diff --git a/src/http_htx.c b/src/http_htx.c
index 6b06336e6..3535fa713 100644
--- a/src/http_htx.c
+++ b/src/http_htx.c
@@ -432,7 +432,7 @@ int http_replace_req_path(struct htx *htx, const struct ist 
path, int with_qs)
vsn = ist2(temp->area + meth.len, HTX_SL_REQ_VLEN(sl));
 
chunk_memcat(temp, uri.ptr, p.ptr - uri.ptr); /* uri: host part 
*/
-   chunk_memcat(temp, path.ptr, path.len);   /* uri: new path 
*/
+   chunk_istcat(temp, path); /* uri: new path 
*/
chunk_memcat(temp, p.ptr + plen, p.len - plen);   /* uri: QS part */
uri = ist2(temp->area + meth.len + vsn.len, uri.len - plen + path.len);
 
@@ -711,7 +711,7 @@ int http_update_authority(struct htx *htx, struct htx_sl 
*sl, const struct ist h
vsn = ist2(temp->area + meth.len, HTX_SL_REQ_VLEN(sl));
 
chunk_memcat(temp, uri.ptr, authority.ptr - uri.ptr);
-   chunk_memcat(temp, host.ptr, host.len);
+   chunk_istcat(temp, host);
chunk_memcat(temp, istend(authority), istend(uri) - istend(authority));
uri = ist2(temp->area + meth.len + vsn.len, host.len + uri.len - 
authority.len); /* uri */
 
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index f20b46b71..ba3a54617 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -1242,17 +1242,17 @@ static int fcgi_set_default_param(struct fcgi_conn 
*fconn, struct fcgi_strm *fst
if (!(params->mask & FCGI_SP_REQ_METH)) {
p  = htx_sl_req_meth(sl);
params->meth = ist2(b_tail(params->p), p.len);
-   chunk_memcat(params->p, p.ptr, p.len);
+   chunk_istcat(params->p, p);
}
if (!(params->mask & FCGI_SP_REQ_URI)) {
p = h1_get_uri(sl);
params->uri = ist2(b_tail(params->p), p.len);
-   chunk_memcat(params->p, p.ptr, p.len);
+   chunk_istcat(params->p, p);
}
if (!(params->mask & FCGI_SP_SRV_PROTO)) {
p  = htx_sl_req_vsn(sl);
params->vsn = ist2(b_tail(params->p), p.len);
-   chunk_memcat(params->p, p.ptr, p.len);
+   chunk_istcat(params->p, p);
}
if (!(params->mask & FCGI_SP_SRV_PORT)) {
char *end;
@@ -1361,7 +1361,7 @@ static int fcgi_set_default_param(struct fcgi_conn 
*fconn, struct fcgi_strm *fst
/* Decode the path. it must first be copied to keep the URI
 * untouched.
 */
-   chunk_memcat(params->p, path.ptr, path.len);
+   chunk_istcat(params->p, path);
path.ptr = b_tail(params->p) - path.len;
len = url_decode(ist0(path), 0);
if (len < 0)
@@ -1415,7 +1415,7 @@ static int fcgi_set_default_param(struct fcgi_conn 
*fconn, struct fcgi_strm *fst
struct ist sn = params->scriptname;
 
params->scriptname = ist2(b_tail(params->p), 
len+fconn->app->index.len);
-   chunk_memcat(params->p, sn.ptr, sn.len);
+   chunk_istcat(params->p, sn);
chunk_memcat(params->p, fconn->app->index.ptr, 
fconn->app->index.len);
}
}
diff --git a/src/tcpcheck.c b/src/tcpcheck.c
index bf497fec5..294e49bcc 100644
--- a/src/tcpcheck.c
+++ b/src/tcpcheck.c
@@ -429,7 +429,7 @@ static void tcpcheck_expect_onerror_message(struct buffer 
*msg, struct check *ch
 * 4. Otherwise produce the generic tcp-check info message
 */
if (istlen(info)) {
-   chunk_strncat(msg, istptr(info), istlen(info));
+   chunk_istcat(msg, info);
goto comment;
}
else if (!LIST_ISEMPTY(>expect.onerror_fmt)) {
@@ -517,7 +517,7 @@ static void tcpcheck_expect_onsuccess_message(struct buffer 
*msg, struct check *
 * 4. Otherwise produce 

[PATCH 6/6] CLEANUP: chunk: Remove duplicated chunk_Xcat implementation

2021-11-08 Thread Tim Duesterhus
Delegate chunk_istcat, chunk_cat and chunk_strncat to the most generic
chunk_memcat.
---
 include/haproxy/chunk.h | 41 +
 1 file changed, 13 insertions(+), 28 deletions(-)

diff --git a/include/haproxy/chunk.h b/include/haproxy/chunk.h
index af9ef816b..05fd16121 100644
--- a/include/haproxy/chunk.h
+++ b/include/haproxy/chunk.h
@@ -107,28 +107,6 @@ static inline int chunk_cpy(struct buffer *chk, const 
struct buffer *src)
return 1;
 }
 
-/* appends chunk  after . Returns 0 in case of failure. */
-static inline int chunk_cat(struct buffer *chk, const struct buffer *src)
-{
-   if (unlikely(chk->data + src->data > chk->size))
-   return 0;
-
-   memcpy(chk->area + chk->data, src->area, src->data);
-   chk->data += src->data;
-   return 1;
-}
-
-/* appends ist  after . Returns 0 in case of failure. */
-static inline int chunk_istcat(struct buffer *chk, const struct ist src)
-{
-   if (unlikely(chk->data + src.len > chk->size))
-   return 0;
-
-   memcpy(chk->area + chk->data, src.ptr, src.len);
-   chk->data += src.len;
-   return 1;
-}
-
 /* copies memory area  into  for  bytes. Returns 0 in
  * case of failure. No trailing zero is added.
  */
@@ -158,6 +136,18 @@ static inline int chunk_memcat(struct buffer *chk, const 
char *src,
return 1;
 }
 
+/* appends ist  after . Returns 0 in case of failure. */
+static inline int chunk_istcat(struct buffer *chk, const struct ist src)
+{
+   return chunk_memcat(chk, istptr(src), istlen(src));
+}
+
+/* appends chunk  after . Returns 0 in case of failure. */
+static inline int chunk_cat(struct buffer *chk, const struct buffer *src)
+{
+   return chunk_memcat(chk, src->area, src->data);
+}
+
 /* copies str into  followed by a trailing zero. Returns 0 in
  * case of failure.
  */
@@ -218,12 +208,7 @@ static inline int chunk_strcat(struct buffer *chk, const 
char *str)
  */
 static inline int chunk_strncat(struct buffer *chk, const char *str, int nb)
 {
-   if (unlikely(chk->data + nb >= chk->size))
-   return 0;
-
-   memcpy(chk->area + chk->data, str, nb);
-   chk->data += nb;
-   return 1;
+   return chunk_memcat(chk, str, nb);
 }
 
 /* Adds a trailing zero to the current chunk and returns the pointer to the
-- 
2.33.1




[PATCH 0/6] Probably final Coccinelle Cleanup

2021-11-08 Thread Tim Duesterhus
Hi Willy,

find my (probably :-) ) final CLEANUP series for 2.5.

Regarding the final patch:

'chunk_strncat()' appears to be completely redundant, it simply passes through
the arguments and even takes an int instead of a size_t. Should it be removed?

Best regards
Tim Düsterhus

Tim Duesterhus (6):
  DEV: coccinelle: Add rule to use `isttrim()` where possible
  CLEANUP: Apply ist.cocci
  DEV: coccinelle: Add rule to use `chunk_istcat()` instead of
`chunk_memcat()`
  DEV: coccinelle: Add rule to use `chunk_istcat()` instead of
`chunk_strncat()`
  CLEANUP: Apply ist.cocci
  CLEANUP: chunk: Remove duplicated chunk_Xcat implementation

 dev/coccinelle/ist.cocci | 24 +++
 include/haproxy/chunk.h  | 41 +---
 src/cache.c  |  5 ++---
 src/flt_trace.c  |  3 +--
 src/hlua.c   |  6 ++
 src/http_ana.c   |  3 +--
 src/http_fetch.c |  2 +-
 src/http_htx.c   |  4 ++--
 src/log.c|  6 ++
 src/mux_fcgi.c   | 10 +-
 src/tcpcheck.c   |  4 ++--
 11 files changed, 55 insertions(+), 53 deletions(-)

-- 
2.33.1




[PATCH 1/6] DEV: coccinelle: Add rule to use `isttrim()` where possible

2021-11-08 Thread Tim Duesterhus
This replaces `if (i.len > e) i.len = e;` by `isttrim(i, e)`.
---
 dev/coccinelle/ist.cocci | 8 
 1 file changed, 8 insertions(+)

diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
index 5b6aa6b2c..7e9a6ac05 100644
--- a/dev/coccinelle/ist.cocci
+++ b/dev/coccinelle/ist.cocci
@@ -44,6 +44,14 @@ struct ist i;
 - (\(i.ptr\|istptr(i)\) + \(i.len\|istlen(i)\))
 + istend(i)
 
+@@
+struct ist i;
+expression e;
+@@
+
+- if (\(i.len\|istlen(i)\) > e) { i.len = e; }
++ i = isttrim(i, e);
+
 @@
 struct ist i;
 @@
-- 
2.33.1




[PATCH 3/6] DEV: coccinelle: Add rule to use `chunk_istcat()` instead of `chunk_memcat()`

2021-11-08 Thread Tim Duesterhus
This replaces `chunk_memcat()` with `chunk_istcat()` if the parameters are the
ist's `.ptr` and `.len`.
---
 dev/coccinelle/ist.cocci | 8 
 1 file changed, 8 insertions(+)

diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
index 7e9a6ac05..4945141b2 100644
--- a/dev/coccinelle/ist.cocci
+++ b/dev/coccinelle/ist.cocci
@@ -52,6 +52,14 @@ expression e;
 - if (\(i.len\|istlen(i)\) > e) { i.len = e; }
 + i = isttrim(i, e);
 
+@@
+struct ist i;
+struct buffer *b;
+@@
+
+- chunk_memcat(b, \(i.ptr\|istptr(i)\) , \(i.len\|istlen(i)\));
++ chunk_istcat(b, i);
+
 @@
 struct ist i;
 @@
-- 
2.33.1




[PATCH 2/6] CLEANUP: Apply ist.cocci

2021-11-08 Thread Tim Duesterhus
Make use of the new rules to use `isttrim()`.
---
 src/cache.c | 3 +--
 src/flt_trace.c | 3 +--
 src/hlua.c  | 6 ++
 src/http_ana.c  | 3 +--
 src/log.c   | 6 ++
 5 files changed, 7 insertions(+), 14 deletions(-)

diff --git a/src/cache.c b/src/cache.c
index ba2b63c49..e871a7b30 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -622,8 +622,7 @@ cache_store_http_payload(struct stream *s, struct filter 
*filter, struct http_ms
case HTX_BLK_DATA:
v = htx_get_blk_value(htx, blk);
v = istadv(v, offset);
-   if (v.len > len)
-   v.len = len;
+   v = isttrim(v, len);
 
info = (type << 28) + v.len;
chunk_memcat(, (char *), 
sizeof(info));
diff --git a/src/flt_trace.c b/src/flt_trace.c
index b3efea6f9..5aabcb2b0 100644
--- a/src/flt_trace.c
+++ b/src/flt_trace.c
@@ -146,8 +146,7 @@ trace_htx_hexdump(struct htx *htx, unsigned int offset, 
unsigned int len)
v = istadv(v, offset);
offset = 0;
 
-   if (v.len > len)
-   v.len = len;
+   v = isttrim(v, len);
len -= v.len;
if (type == HTX_BLK_DATA)
trace_hexdump(v);
diff --git a/src/hlua.c b/src/hlua.c
index c2e56b3b9..94f656234 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -6329,8 +6329,7 @@ static int _hlua_http_msg_dup(struct http_msg *msg, 
lua_State *L, size_t offset,
case HTX_BLK_DATA:
v = htx_get_blk_value(htx, blk);
v = istadv(v, offset);
-   if (v.len > len)
-   v.len = len;
+   v = isttrim(v, len);
 
luaL_addlstring(, v.ptr, v.len);
ret += v.len;
@@ -6431,8 +6430,7 @@ static void _hlua_http_msg_delete(struct http_msg *msg, 
struct filter *filter, s
goto end;
v = htx_get_blk_value(htx, blk);
v.ptr += htxret.ret;
-   if (v.len > len)
-   v.len  = len;
+   v = isttrim(v, len);
blk = htx_replace_blk_value(htx, blk, v, IST_NULL);
len -= v.len;
ret += v.len;
diff --git a/src/http_ana.c b/src/http_ana.c
index 9d11284a5..c037261cf 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -4912,8 +4912,7 @@ static void http_capture_headers(struct htx *htx, char 
**cap, struct cap_hdr *ca
}
 
v = htx_get_blk_value(htx, blk);
-   if (v.len > h->len)
-   v.len = h->len;
+   v = isttrim(v, h->len);
 
memcpy(cap[h->index], v.ptr, v.len);
cap[h->index][v.len]=0;
diff --git a/src/log.c b/src/log.c
index 81bf97b34..e7607c2c4 100644
--- a/src/log.c
+++ b/src/log.c
@@ -1665,8 +1665,7 @@ static inline void __do_send_log(struct logsrv *logsrv, 
int nblogger, int level,
struct ist msg;
 
msg = ist2(message, size);
-   if (msg.len > logsrv->maxlen)
-   msg.len = logsrv->maxlen;
+   msg = isttrim(msg, logsrv->maxlen);
 
sent = sink_write(logsrv->sink, , 1, level, 
logsrv->facility, metadata);
}
@@ -1674,8 +1673,7 @@ static inline void __do_send_log(struct logsrv *logsrv, 
int nblogger, int level,
struct ist msg;
 
msg = ist2(message, size);
-   if (msg.len > logsrv->maxlen)
-   msg.len = logsrv->maxlen;
+   msg = isttrim(msg, logsrv->maxlen);
 
sent = fd_write_frag_line(*plogfd, logsrv->maxlen, msg_header, 
nbelem, , 1, 1);
}
-- 
2.33.1




[PATCH 4/6] DEV: coccinelle: Add rule to use `chunk_istcat()` instead of `chunk_strncat()`

2021-11-08 Thread Tim Duesterhus
This replaces `chunk_strncat()` with `chunk_istcat()` if the parameters are the
ist's `.ptr` and `.len`.
---
 dev/coccinelle/ist.cocci | 8 
 1 file changed, 8 insertions(+)

diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
index 4945141b2..680afbade 100644
--- a/dev/coccinelle/ist.cocci
+++ b/dev/coccinelle/ist.cocci
@@ -60,6 +60,14 @@ struct buffer *b;
 - chunk_memcat(b, \(i.ptr\|istptr(i)\) , \(i.len\|istlen(i)\));
 + chunk_istcat(b, i);
 
+@@
+struct ist i;
+struct buffer *b;
+@@
+
+- chunk_strncat(b, \(i.ptr\|istptr(i)\) , \(i.len\|istlen(i)\));
++ chunk_istcat(b, i);
+
 @@
 struct ist i;
 @@
-- 
2.33.1




[PATCH 4/4] CLEANUP: Re-apply xalloc_size.cocci

2021-11-06 Thread Tim Duesterhus
Use a consistent size as the parameter for the *alloc family.
---
 src/ev_evports.c | 2 +-
 src/hlua.c   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/ev_evports.c b/src/ev_evports.c
index 710d51236..73e97517c 100644
--- a/src/ev_evports.c
+++ b/src/ev_evports.c
@@ -293,7 +293,7 @@ static int init_evports_per_thread()
int fd;
 
evports_evlist_max = global.tune.maxpollevents;
-   evports_evlist = calloc(evports_evlist_max, sizeof (port_event_t));
+   evports_evlist = calloc(evports_evlist_max, sizeof(*evports_evlist));
if (evports_evlist == NULL) {
goto fail_alloc;
}
diff --git a/src/hlua.c b/src/hlua.c
index 086a8e0be..c2e56b3b9 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -7089,7 +7089,7 @@ struct http_hdr *hlua_httpclient_table_to_hdrs(lua_State 
*L)
 
if (hdr_num) {
/* alloc and copy the headers in the httpclient struct */
-   result = calloc((hdr_num + 1), sizeof(*hdrs));
+   result = calloc((hdr_num + 1), sizeof(*result));
if (!result)
goto skip_headers;
memcpy(result, hdrs, sizeof(struct http_hdr) * (hdr_num + 1));
-- 
2.33.1




[PATCH 1/4] DEV: coccinelle: Remove unused `expression e`

2021-11-06 Thread Tim Duesterhus
Introduced in ef00c533e1ed37b414aab912f492be794ab589cc.
---
 dev/coccinelle/ist.cocci | 1 -
 1 file changed, 1 deletion(-)

diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
index 97ce0a2ad..598ffa3e2 100644
--- a/dev/coccinelle/ist.cocci
+++ b/dev/coccinelle/ist.cocci
@@ -31,7 +31,6 @@ struct ist i;
 
 @@
 struct ist i;
-expression e;
 @@
 
 - i.ptr++;
-- 
2.33.1




[PATCH 3/4] CLEANUP: Apply ist.cocci

2021-11-06 Thread Tim Duesterhus
Make use of the new rules to use `istend()`.
---
 src/h1.c   |  4 ++--
 src/h2.c   |  2 +-
 src/hlua.c |  2 +-
 src/http_htx.c | 11 ++-
 src/htx.c  | 11 +++
 src/tcpcheck.c |  3 ++-
 6 files changed, 19 insertions(+), 14 deletions(-)

diff --git a/src/h1.c b/src/h1.c
index e0ba8d768..99b9c2993 100644
--- a/src/h1.c
+++ b/src/h1.c
@@ -110,7 +110,7 @@ int h1_parse_xfer_enc_header(struct h1m *h1m, struct ist 
value)
h1m->flags |= H1_MF_XFER_ENC;
 
word.ptr = value.ptr - 1; // -1 for next loop's pre-increment
-   e = value.ptr + value.len;
+   e = istend(value);
 
while (++word.ptr < e) {
/* skip leading delimiter and blanks */
@@ -229,7 +229,7 @@ void h1_parse_upgrade_header(struct h1m *h1m, struct ist 
value)
h1m->flags &= ~H1_MF_UPG_WEBSOCKET;
 
word.ptr = value.ptr - 1; // -1 for next loop's pre-increment
-   e = value.ptr + value.len;
+   e = istend(value);
 
while (++word.ptr < e) {
/* skip leading delimiter and blanks */
diff --git a/src/h2.c b/src/h2.c
index dd1f7d9b6..49a1252e9 100644
--- a/src/h2.c
+++ b/src/h2.c
@@ -62,7 +62,7 @@ static int has_forbidden_char(const struct ist ist, const 
char *start)
(1U << (uint8_t)*start) & ((1<<13) | (1<<10) | (1<<0)))
return 1;
start++;
-   } while (start < ist.ptr + ist.len);
+   } while (start < istend(ist));
return 0;
 }
 
diff --git a/src/hlua.c b/src/hlua.c
index e9d4391f7..086a8e0be 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -4750,7 +4750,7 @@ static int hlua_applet_http_new(lua_State *L, struct 
appctx *ctx)
char *p, *q, *end;
 
p = path.ptr;
-   end = path.ptr + path.len;
+   end = istend(path);
q = p;
while (q < end && *q != '?')
q++;
diff --git a/src/http_htx.c b/src/http_htx.c
index 8028cfc99..d93cc3797 100644
--- a/src/http_htx.c
+++ b/src/http_htx.c
@@ -195,7 +195,8 @@ static int __http_find_header(const struct htx *htx, const 
void *pattern, struct
if (istlen(n) < istlen(name))
goto next_blk;
 
-   n = ist2(istptr(n) + istlen(n) - istlen(name), 
istlen(name));
+   n = ist2(istend(n) - istlen(name),
+istlen(name));
if (!isteqi(n, name))
goto next_blk;
break;
@@ -219,8 +220,8 @@ static int __http_find_header(const struct htx *htx, const 
void *pattern, struct
ctx->lws_before++;
}
if (!(flags & HTTP_FIND_FL_FULL))
-   v.len = http_find_hdr_value_end(v.ptr, v.ptr + v.len) - 
v.ptr;
-   while (v.len && HTTP_IS_LWS(*(v.ptr + v.len - 1))) {
+   v.len = http_find_hdr_value_end(v.ptr, istend(v)) - 
v.ptr;
+   while (v.len && HTTP_IS_LWS(*(istend(v) - 1))) {
v.len--;
ctx->lws_after++;
}
@@ -710,7 +711,7 @@ int http_update_authority(struct htx *htx, struct htx_sl 
*sl, const struct ist h
 
chunk_memcat(temp, uri.ptr, authority.ptr - uri.ptr);
chunk_memcat(temp, host.ptr, host.len);
-   chunk_memcat(temp, authority.ptr + authority.len, uri.ptr + uri.len - 
(authority.ptr + authority.len));
+   chunk_memcat(temp, istend(authority), istend(uri) - istend(authority));
uri = ist2(temp->area + meth.len + vsn.len, host.len + uri.len - 
authority.len); /* uri */
 
return http_replace_stline(htx, meth, uri, vsn);
@@ -917,7 +918,7 @@ int http_str_to_htx(struct buffer *buf, struct ist raw, 
char **errmsg)
 
h1m_init_res();
h1m.flags |= H1_MF_NO_PHDR;
-   ret = h1_headers_to_hdr_list(raw.ptr, raw.ptr + raw.len,
+   ret = h1_headers_to_hdr_list(raw.ptr, istend(raw),
 hdrs, sizeof(hdrs)/sizeof(hdrs[0]), , 
);
if (ret <= 0) {
memprintf(errmsg, "unabled to parse headers (error offset: 
%d)", h1m.err_pos);
diff --git a/src/htx.c b/src/htx.c
index 8c6e368e7..940989c50 100644
--- a/src/htx.c
+++ b/src/htx.c
@@ -601,11 +601,13 @@ struct htx_blk *htx_replace_blk_value(struct htx *htx, 
struct htx_blk *blk,
if (delta <= 0) {
/* compression: copy new data first then move the end */
memcpy(old.ptr, new.ptr, new.len);
-   memmove(old.ptr + new.len, old.ptr + old.len, (v.ptr + 
v.len) - (old.ptr + old.len));
+   memmove(old.ptr + new.len, istend(old),
+   istend(v) - istend(old));
}
else {
/* expansion: move the end 

[PATCH 2/4] DEV: coccinelle: Add rule to use `istend()` where possible

2021-11-06 Thread Tim Duesterhus
This replaces `i.ptr + i.len` by `istend()`.
---
 dev/coccinelle/ist.cocci | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
index 598ffa3e2..5b6aa6b2c 100644
--- a/dev/coccinelle/ist.cocci
+++ b/dev/coccinelle/ist.cocci
@@ -41,6 +41,13 @@ struct ist i;
 struct ist i;
 @@
 
+- (\(i.ptr\|istptr(i)\) + \(i.len\|istlen(i)\))
++ istend(i)
+
+@@
+struct ist i;
+@@
+
 - i.ptr != NULL
 + isttest(i)
 
-- 
2.33.1




[PATCH 2/2] CLEANUP: Apply ist.cocci

2021-11-04 Thread Tim Duesterhus
Make use of the new rules to use `istnext()`.
---
 src/cache.c| 24 
 src/http_htx.c | 12 
 src/mqtt.c |  2 +-
 3 files changed, 17 insertions(+), 21 deletions(-)

diff --git a/src/cache.c b/src/cache.c
index feab63f07..ba2b63c49 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -2180,49 +2180,49 @@ static int parse_encoding_value(struct ist encoding, 
unsigned int *encoding_valu
 
switch (*encoding.ptr) {
case 'a':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "aes128gcm", 
VARY_ENCODING_AES128GCM);
break;
case 'b':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "br", 
VARY_ENCODING_BR);
break;
case 'c':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "compress", 
VARY_ENCODING_COMPRESS);
break;
case 'd':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "deflate", 
VARY_ENCODING_DEFLATE);
break;
case 'e':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "exi", 
VARY_ENCODING_EXI);
break;
case 'g':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "gzip", 
VARY_ENCODING_GZIP);
break;
case 'i':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "identity", 
VARY_ENCODING_IDENTITY);
break;
case 'p':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "pack200-gzip", 
VARY_ENCODING_PACK200_GZIP);
break;
case 'x':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "x-gzip", 
VARY_ENCODING_GZIP);
if (!*encoding_value)
*encoding_value = CHECK_ENCODING(encoding, 
"x-compress", VARY_ENCODING_COMPRESS);
break;
case 'z':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = CHECK_ENCODING(encoding, "zstd", 
VARY_ENCODING_ZSTD);
break;
case '*':
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
*encoding_value = VARY_ENCODING_STAR;
break;
default:
@@ -2238,7 +2238,7 @@ static int parse_encoding_value(struct ist encoding, 
unsigned int *encoding_valu
return -1;
 
if (has_null_weight) {
-   encoding = istadv(encoding, 1);
+   encoding = istnext(encoding);
 
encoding = http_trim_leading_spht(encoding);
 
diff --git a/src/http_htx.c b/src/http_htx.c
index bfdcaef86..8028cfc99 100644
--- a/src/http_htx.c
+++ b/src/http_htx.c
@@ -146,8 +146,7 @@ static int __http_find_header(const struct htx *htx, const 
void *pattern, struct
goto next_blk;
/* Skip comma */
if (*(v.ptr) == ',') {
-   v.ptr++;
-   v.len--;
+   v = istnext(v);
}
 
goto return_hdr;
@@ -216,8 +215,7 @@ static int __http_find_header(const struct htx *htx, const 
void *pattern, struct
ctx->lws_before = 0;
ctx->lws_after = 0;
while (v.len && HTTP_IS_LWS(*v.ptr)) {
-   v.ptr++;
-   v.len--;
+   v = istnext(v);
ctx->lws_before++;
}
if (!(flags & HTTP_FIND_FL_FULL))
@@ -457,16 +455,14 @@ int http_replace_req_query(struct htx *htx, const struct 
ist query)
uri = htx_sl_req_uri(sl);
q = uri;
while (q.len > 0 && *(q.ptr) != '?') {
-   q.ptr++;
-   q.len--;
+   q = istnext(q);
}
 
/* skip the question mark or indicate that we must insert it
 * (but only if the format string is not empty then).
 */
if (q.len) {
-   q.ptr++;
-   q.len--;
+   q = istnext(q);
}
else if 

[PATCH 1/2] DEV: coccinelle: Add rule to use `istnext()` where possible

2021-11-04 Thread Tim Duesterhus
This matches both `istadv(..., 1)` as well as raw `.ptr++` uses.
---
 dev/coccinelle/ist.cocci | 16 
 1 file changed, 16 insertions(+)

diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
index c3243302f..97ce0a2ad 100644
--- a/dev/coccinelle/ist.cocci
+++ b/dev/coccinelle/ist.cocci
@@ -26,6 +26,22 @@ expression e;
 struct ist i;
 @@
 
+- i = istadv(i, 1);
++ i = istnext(i);
+
+@@
+struct ist i;
+expression e;
+@@
+
+- i.ptr++;
+- i.len--;
++ i = istnext(i);
+
+@@
+struct ist i;
+@@
+
 - i.ptr != NULL
 + isttest(i)
 
-- 
2.33.1




[PATCH] REGTESTS: Use `feature cmd` for 2.5+ tests (2)

2021-11-04 Thread Tim Duesterhus
This patch effectively is identical to 7ba98480cc5b2ede0fd4cca162959f66beb82c82.
---
 reg-tests/connection/cli_src_dst.vtc| 3 +--
 reg-tests/http-messaging/http_transfer_encoding.vtc | 4 ++--
 reg-tests/http-messaging/srv_ws.vtc | 5 ++---
 reg-tests/http-rules/default_rules.vtc  | 3 +--
 reg-tests/startup/default_rules.vtc | 3 +--
 reg-tests/tcp-rules/default_rules.vtc   | 3 +--
 6 files changed, 8 insertions(+), 13 deletions(-)

diff --git a/reg-tests/connection/cli_src_dst.vtc 
b/reg-tests/connection/cli_src_dst.vtc
index cc0c94545..fa12bc805 100644
--- a/reg-tests/connection/cli_src_dst.vtc
+++ b/reg-tests/connection/cli_src_dst.vtc
@@ -1,7 +1,6 @@
 varnishtest "Test multi-level client source and destination addresses"
 
-#REQUIRE_VERSION=2.5
-
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
 feature ignore_unknown_macro
 
 haproxy h1 -conf {
diff --git a/reg-tests/http-messaging/http_transfer_encoding.vtc 
b/reg-tests/http-messaging/http_transfer_encoding.vtc
index 543e965fa..258b8a9e8 100644
--- a/reg-tests/http-messaging/http_transfer_encoding.vtc
+++ b/reg-tests/http-messaging/http_transfer_encoding.vtc
@@ -1,7 +1,7 @@
 varnishtest "A test to validate Transfer-Encoding header conformance to the 
spec"
-feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.5
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
 
 server s1 {
 rxreq
diff --git a/reg-tests/http-messaging/srv_ws.vtc 
b/reg-tests/http-messaging/srv_ws.vtc
index bce12f6b1..32369a1a3 100644
--- a/reg-tests/http-messaging/srv_ws.vtc
+++ b/reg-tests/http-messaging/srv_ws.vtc
@@ -3,11 +3,10 @@
 
 varnishtest "h2 backend websocket management via server keyword"
 
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
 feature ignore_unknown_macro
 
-#REQUIRE_VERSION=2.5
-#REQUIRE_OPTION=OPENSSL
-
 # haproxy server
 haproxy hapsrv -conf {
defaults
diff --git a/reg-tests/http-rules/default_rules.vtc 
b/reg-tests/http-rules/default_rules.vtc
index a72776c07..3baa33a92 100644
--- a/reg-tests/http-rules/default_rules.vtc
+++ b/reg-tests/http-rules/default_rules.vtc
@@ -1,7 +1,6 @@
 varnishtest "Test declaration of HTTP rules in default sections"
 
-#REQUIRE_VERSION=2.5
-
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
 feature ignore_unknown_macro
 
 server s1 {
diff --git a/reg-tests/startup/default_rules.vtc 
b/reg-tests/startup/default_rules.vtc
index 4c8051312..cd86f7414 100644
--- a/reg-tests/startup/default_rules.vtc
+++ b/reg-tests/startup/default_rules.vtc
@@ -1,7 +1,6 @@
 varnishtest "Misuses of defaults section defining TCP/HTTP rules"
 
-#REQUIRE_VERSION=2.5
-
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
 feature ignore_unknown_macro
 
 #
diff --git a/reg-tests/tcp-rules/default_rules.vtc 
b/reg-tests/tcp-rules/default_rules.vtc
index 826a336cb..a2e8ce9ef 100644
--- a/reg-tests/tcp-rules/default_rules.vtc
+++ b/reg-tests/tcp-rules/default_rules.vtc
@@ -1,7 +1,6 @@
 varnishtest "Test declaration of TCP rules in default sections"
 
-#REQUIRE_VERSION=2.5
-
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
 feature ignore_unknown_macro
 
 server s1 {
-- 
2.33.1




[PATCH] CLEANUP: halog: Remove dead stores

2021-11-04 Thread Tim Duesterhus
Found using clang's scan-build.
---
 admin/halog/halog.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/admin/halog/halog.c b/admin/halog/halog.c
index 900cf5d46..f368c1c6f 100644
--- a/admin/halog/halog.c
+++ b/admin/halog/halog.c
@@ -551,7 +551,8 @@ int convert_date_to_timestamp(const char *field)
d = mo = y = h = m = s = 0;
e = field;
 
-   c = *(e++); // remove '['
+   e++; // remove '['
+
/* day + '/' */
while (1) {
c = *(e++) - '0';
@@ -1148,13 +1149,12 @@ int main(int argc, char **argv)
/* sort all timers */
for (f = 0; f < 5; f++) {
struct eb32_node *n;
-   int val;
 
-   val = 0;
n = eb32_first([f]);
while (n) {
int i;
double d;
+   int val;
 
t = container_of(n, struct timer, node);
last = n->key;
-- 
2.33.1




[PATCH 2/2] CLEANUP: Apply ha_free.cocci

2021-11-04 Thread Tim Duesterhus
Use `ha_free()` where possible.
---
 src/action.c   | 3 +--
 src/server.c   | 3 +--
 src/ssl_ckch.c | 6 ++
 3 files changed, 4 insertions(+), 8 deletions(-)

diff --git a/src/action.c b/src/action.c
index ba465a253..1de97692e 100644
--- a/src/action.c
+++ b/src/action.c
@@ -39,8 +39,7 @@ int check_action_rules(struct list *rules, struct proxy *px, 
int *err_code)
err++;
}
*err_code |= warnif_tcp_http_cond(px, rule->cond);
-   free(errmsg);
-   errmsg = NULL;
+   ha_free();
}
 
return err;
diff --git a/src/server.c b/src/server.c
index a0206021d..a8e85a982 100644
--- a/src/server.c
+++ b/src/server.c
@@ -2380,8 +2380,7 @@ struct server *srv_drop(struct server *srv)
 
EXTRA_COUNTERS_FREE(srv->extra_counters);
 
-   free(srv);
-   srv = NULL;
+   ha_free();
 
  end:
return next;
diff --git a/src/ssl_ckch.c b/src/ssl_ckch.c
index 2378ee349..eeb031b27 100644
--- a/src/ssl_ckch.c
+++ b/src/ssl_ckch.c
@@ -2506,8 +2506,7 @@ static int cli_parse_set_cafile(char **args, char 
*payload, struct appctx *appct
appctx->ctx.ssl.new_cafile_entry = NULL;
appctx->ctx.ssl.old_cafile_entry = NULL;
 
-   free(appctx->ctx.ssl.path);
-   appctx->ctx.ssl.path = NULL;
+   ha_free(>ctx.ssl.path);
 
HA_SPIN_UNLOCK(CKCH_LOCK, _lock);
return cli_dynerr(appctx, memprintf(, "%sCan't update 
%s!\n", err ? err : "", args[3]));
@@ -3225,8 +3224,7 @@ static int cli_parse_set_crlfile(char **args, char 
*payload, struct appctx *appc
appctx->ctx.ssl.new_crlfile_entry = NULL;
appctx->ctx.ssl.old_crlfile_entry = NULL;
 
-   free(appctx->ctx.ssl.path);
-   appctx->ctx.ssl.path = NULL;
+   ha_free(>ctx.ssl.path);
 
HA_SPIN_UNLOCK(CKCH_LOCK, _lock);
return cli_dynerr(appctx, memprintf(, "%sCan't update 
%s!\n", err ? err : "", args[3]));
-- 
2.33.1




[PATCH 1/2] DEV: coccinelle: Add ha_free.cocci

2021-11-04 Thread Tim Duesterhus
Taken from 61cfdf4fd8a93dc6fd9922d5b309a71bdc7d2853.
---
 dev/coccinelle/ha_free.cocci | 6 ++
 1 file changed, 6 insertions(+)
 create mode 100644 dev/coccinelle/ha_free.cocci

diff --git a/dev/coccinelle/ha_free.cocci b/dev/coccinelle/ha_free.cocci
new file mode 100644
index 0..00190393b
--- /dev/null
+++ b/dev/coccinelle/ha_free.cocci
@@ -0,0 +1,6 @@
+@ rule @
+expression E;
+@@
+- free(E);
+- E = NULL;
++ ha_free();
-- 
2.33.1




[PATCH 1/2] MINOR: jwt: Make invalid static JWT algorithms an error in `jwt_verify` converter

2021-10-29 Thread Tim Duesterhus
It is not useful to start a configuration where an invalid static string is
provided as the JWT algorithm. Better make the administrator aware of the
suspected typo by failing to start.
---
 src/sample.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/src/sample.c b/src/sample.c
index 9200ca303..5abf4712a 100644
--- a/src/sample.c
+++ b/src/sample.c
@@ -3522,14 +3522,14 @@ static int sample_conv_jwt_verify_check(struct arg 
*args, struct sample_conv *co
 
switch(alg) {
case JWT_ALG_DEFAULT:
-   memprintf(err, "unknown JWT algorithm : %s", *err);
-   break;
+   memprintf(err, "unknown JWT algorithm: %s", 
args[0].data.str.area);
+   return 0;
 
case JWS_ALG_PS256:
case JWS_ALG_PS384:
case JWS_ALG_PS512:
memprintf(err, "RSASSA-PSS JWS signing not managed 
yet");
-   break;
+   return 0;
 
default:
break;
-- 
2.33.1




[PATCH 2/2] BUG/MINOR: jwt: Fix jwt_parse_alg incorrectly returning JWS_ALG_NONE

2021-10-29 Thread Tim Duesterhus
Hi Remi, Willy,

Is the length check at the start of `jwt_parse_alg()` actually useful? I would
expect that the vast majority of strings passed are valid algorithms that are
*not* `none`. Thus I expect this `if()` to almost never be `true`.

Should the `if()` be removed and a new `case 'n'` be added to the switch? Or
should an `unlikely()` be added around the condition?

Best regards
Tim Düsterhus

Apply with `git am --scissors` to automatically cut the commit message.

-- >8 --
`jwt_parse_alg()` previously incorrectly returned `JWS_ALG_NONE` for inputs
`""`, `"n"`, `"no"`, and `"non"` due to an incorrect check with `strncmp` that
also matches prefixes.

This bug did not affect the matching of the other known variants, because of
the special cased length check at the start of the function. Nonetheless these
variants are also affected and this bug might've been exposed during 
refactoring.

I did not see an small fix for `strncmp`, so I used the opportunity to migrate
this function to the ist API, which avoids the issue altogether. The overall
structure of this function was preserved.

A config like:

http-response set-header bearer %[str(),jwt_verify(,)]

Now correctly returns:

> [ALERT](109770) : config : parsing [./haproxy.cfg:6] : error detected in
> proxy 'test' while parsing 'http-response set-header' rule : failed to parse
> sample expression  : invalid args in converter
> 'jwt_verify' : unknown JWT algorithm: .

JWT support is new in 2.5, no backport needed.
---
 include/haproxy/jwt.h |  2 +-
 src/jwt.c | 34 +-
 src/sample.c  |  2 +-
 3 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/include/haproxy/jwt.h b/include/haproxy/jwt.h
index a343ffaf7..84421530d 100644
--- a/include/haproxy/jwt.h
+++ b/include/haproxy/jwt.h
@@ -26,7 +26,7 @@
 #include 
 
 #ifdef USE_OPENSSL
-enum jwt_alg jwt_parse_alg(const char *alg_str, unsigned int alg_len);
+enum jwt_alg jwt_parse_alg(struct ist);
 int jwt_tokenize(const struct buffer *jwt, struct jwt_item *items, unsigned 
int *item_num);
 int jwt_tree_load_cert(char *path, int pathlen, char **err);
 
diff --git a/src/jwt.c b/src/jwt.c
index 94bfa5adb..590b18c3b 100644
--- a/src/jwt.c
+++ b/src/jwt.c
@@ -28,49 +28,49 @@ static struct eb_root jwt_cert_tree = EB_ROOT_UNIQUE;
  * The possible algorithm strings that can be found in a JWS's JOSE header are
  * defined in section 3.1 of RFC7518.
  */
-enum jwt_alg jwt_parse_alg(const char *alg_str, unsigned int alg_len)
+enum jwt_alg jwt_parse_alg(struct ist str)
 {
enum jwt_alg alg = JWT_ALG_DEFAULT;
 
/* Algorithms are all 5 characters long apart from "none". */
-   if (alg_len < sizeof("HS256")-1) {
-   if (strncmp("none", alg_str, alg_len) == 0)
+   if (istlen(str) < sizeof("HS256")-1) {
+   if (isteq(str, ist("none")))
alg = JWS_ALG_NONE;
return alg;
}
 
if (alg == JWT_ALG_DEFAULT) {
-   switch(*alg_str++) {
+   switch(istshift()) {
case 'H':
-   if (strncmp(alg_str, "S256", alg_len-1) == 0)
+   if (isteq(str, ist("S256")))
alg = JWS_ALG_HS256;
-   else if (strncmp(alg_str, "S384", alg_len-1) == 0)
+   else if (isteq(str, ist("S384")))
alg = JWS_ALG_HS384;
-   else if (strncmp(alg_str, "S512", alg_len-1) == 0)
+   else if (isteq(str, ist("S512")))
alg = JWS_ALG_HS512;
break;
case 'R':
-   if (strncmp(alg_str, "S256", alg_len-1) == 0)
+   if (isteq(str, ist("S256")))
alg = JWS_ALG_RS256;
-   else if (strncmp(alg_str, "S384", alg_len-1) == 0)
+   else if (isteq(str, ist("S384")))
alg = JWS_ALG_RS384;
-   else if (strncmp(alg_str, "S512", alg_len-1) == 0)
+   else if (isteq(str, ist("S512")))
alg = JWS_ALG_RS512;
break;
case 'E':
-   if (strncmp(alg_str, "S256", alg_len-1) == 0)
+   if (isteq(str, ist("S256")))
alg = JWS_ALG_ES256;
-   else if (strncmp(alg_str, "S384", alg_len-1) == 0)
+   else if (isteq(str, ist("S384")))
alg = JWS_ALG_ES384;
-   else if (strncmp(alg_str, "S512", alg_len-1) == 0)
+   else if (isteq(str, ist("S512")))
alg = JWS_ALG_ES512;
break;
case 'P':
-   if (strncmp(alg_str, "S256", alg_len-1) == 0)
+   

[PATCH] CLEANUP: hlua: Remove obsolete branch in `hlua_alloc()`

2021-10-23 Thread Tim Duesterhus
This branch is no longer required, because the `!nsize` case is handled for any
value of `ptr` now.

see 22586524e32f14c44239063088a38ccea8abc9b7
see a5efdff93c36f75345a2a18f18bffee9b602bc7b
---
 src/hlua.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/src/hlua.c b/src/hlua.c
index ac61a3171..0e12614af 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -11463,9 +11463,6 @@ static void *hlua_alloc(void *ud, void *ptr, size_t 
osize, size_t nsize)
struct hlua_mem_allocator *zone = ud;
size_t limit, old, new;
 
-   if (unlikely(!ptr && !nsize))
-   return NULL;
-
/* a limit of ~0 means unlimited and boot complete, so there's no need
 * for accounting anymore.
 */
-- 
2.33.1




[PATCH] DEV: coccinelle: Add realloc_leak.cocci

2021-10-23 Thread Tim Duesterhus
This coccinelle patch finds locations where the return value of `realloc()` is
assigned to the pointer passed to `realloc()`. This calls will leak memory if
`realloc()` returns `NULL`.
---
 dev/coccinelle/realloc_leak.cocci | 6 ++
 1 file changed, 6 insertions(+)
 create mode 100644 dev/coccinelle/realloc_leak.cocci

diff --git a/dev/coccinelle/realloc_leak.cocci 
b/dev/coccinelle/realloc_leak.cocci
new file mode 100644
index 0..c201b808c
--- /dev/null
+++ b/dev/coccinelle/realloc_leak.cocci
@@ -0,0 +1,6 @@
+@@
+expression E;
+expression F;
+@@
+
+* E = realloc(E, F);
-- 
2.33.1




[PATCH 2/2] CLEANUP: jwt: Remove the use of a trash buffer in jwt_jwsverify_rsa_ecdsa()

2021-10-18 Thread Tim Duesterhus
`trash` was completely unused within this function.
---
 src/jwt.c | 10 +-
 1 file changed, 1 insertion(+), 9 deletions(-)

diff --git a/src/jwt.c b/src/jwt.c
index d075bcfd4..94bfa5adb 100644
--- a/src/jwt.c
+++ b/src/jwt.c
@@ -214,14 +214,9 @@ jwt_jwsverify_rsa_ecdsa(const struct jwt_ctx *ctx, const 
struct buffer *decoded_
const EVP_MD *evp = NULL;
EVP_MD_CTX *evp_md_ctx;
enum jwt_vrfy_status retval = JWT_VRFY_KO;
-   struct buffer *trash = NULL;
struct ebmb_node *eb;
struct jwt_cert_tree_entry *entry = NULL;
 
-   trash = alloc_trash_chunk();
-   if (!trash)
-   return JWT_VRFY_OUT_OF_MEMORY;
-
switch(ctx->alg) {
case JWS_ALG_RS256:
case JWS_ALG_ES256:
@@ -239,10 +234,8 @@ jwt_jwsverify_rsa_ecdsa(const struct jwt_ctx *ctx, const 
struct buffer *decoded_
}
 
evp_md_ctx = EVP_MD_CTX_new();
-   if (!evp_md_ctx) {
-   free_trash_chunk(trash);
+   if (!evp_md_ctx)
return JWT_VRFY_OUT_OF_MEMORY;
-   }
 
eb = ebst_lookup(_cert_tree, ctx->key);
 
@@ -267,7 +260,6 @@ jwt_jwsverify_rsa_ecdsa(const struct jwt_ctx *ctx, const 
struct buffer *decoded_
 
 end:
EVP_MD_CTX_free(evp_md_ctx);
-   free_trash_chunk(trash);
return retval;
 }
 
-- 
2.33.0




[PATCH 1/2] CLEANUP: jwt: Remove the use of a trash buffer in jwt_jwsverify_hmac()

2021-10-18 Thread Tim Duesterhus
The OpenSSL documentation (https://www.openssl.org/docs/man1.1.0/man3/HMAC.html)
specifies:

> It places the result in md (which must have space for the output of the hash
> function, which is no more than EVP_MAX_MD_SIZE bytes). If md is NULL, the
> digest is placed in a static array. The size of the output is placed in
> md_len, unless it is NULL. Note: passing a NULL value for md to use the
> static array is not thread safe.

`EVP_MAX_MD_SIZE` appears to be defined as `64`, so let's simply use a stack
buffer to avoid the whole memory management.
---
 src/jwt.c | 12 +---
 1 file changed, 1 insertion(+), 11 deletions(-)

diff --git a/src/jwt.c b/src/jwt.c
index e29a1c797..d075bcfd4 100644
--- a/src/jwt.c
+++ b/src/jwt.c
@@ -175,19 +175,11 @@ static enum jwt_vrfy_status
 jwt_jwsverify_hmac(const struct jwt_ctx *ctx, const struct buffer 
*decoded_signature)
 {
const EVP_MD *evp = NULL;
-   unsigned char *signature = NULL;
+   unsigned char signature[EVP_MAX_MD_SIZE];
unsigned int signature_length = 0;
-   struct buffer *trash = NULL;
unsigned char *hmac_res = NULL;
enum jwt_vrfy_status retval = JWT_VRFY_KO;
 
-   trash = alloc_trash_chunk();
-   if (!trash)
-   return JWT_VRFY_OUT_OF_MEMORY;
-
-   signature = (unsigned char*)trash->area;
-   signature_length = trash->size;
-
switch(ctx->alg) {
case JWS_ALG_HS256:
evp = EVP_sha256();
@@ -208,8 +200,6 @@ jwt_jwsverify_hmac(const struct jwt_ctx *ctx, const struct 
buffer *decoded_signa
  (CRYPTO_memcmp(decoded_signature->area, signature, 
signature_length) == 0))
retval = JWT_VRFY_OK;
 
-   free_trash_chunk(trash);
-
return retval;
 }
 
-- 
2.33.0




[PATCH] CLEANUP: Consistently `unsigned int` for bitfields

2021-10-16 Thread Tim Duesterhus
see 6a0dd733906611dea958cf74b9f51bb16028ae20

Found using GitHub's CodeQL scan.
---
 include/haproxy/stick_table-t.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/haproxy/stick_table-t.h b/include/haproxy/stick_table-t.h
index 3b1f2b3ef..133f992b5 100644
--- a/include/haproxy/stick_table-t.h
+++ b/include/haproxy/stick_table-t.h
@@ -125,8 +125,8 @@ struct stktable_data_type {
const char *name; /* name of the data type */
int std_type; /* standard type we can use for this data, STD_T_* */
int arg_type; /* type of optional argument, ARG_T_* */
-   int is_array:1;   /* this is an array of gpc/gpt */
-   int is_local:1;   /* this is local only and never learned */
+   unsigned int is_array:1;   /* this is an array of gpc/gpt */
+   unsigned int is_local:1;   /* this is local only and never learned */
 };
 
 /* stick table keyword type */
-- 
2.33.0




[PATCH 1/2] CI: Add `permissions` to GitHub Actions

2021-10-16 Thread Tim Duesterhus
This change locks down the permissions of the access token in GitHub Actions to
only allow reading the repository contents and nothing else.

see 
https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token
---
 .github/workflows/codespell.yml| 3 +++
 .github/workflows/compliance.yml   | 3 +++
 .github/workflows/contrib.yml  | 3 +++
 .github/workflows/coverity.yml | 3 +++
 .github/workflows/musl.yml | 3 +++
 .github/workflows/openssl-nodeprecated.yml | 3 +++
 .github/workflows/vtest.yml| 3 +++
 .github/workflows/windows.yml  | 3 +++
 8 files changed, 24 insertions(+)

diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index de49f4343..61edaeb9e 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -4,6 +4,9 @@ on:
   schedule:
 - cron: "0 0 * * 2"
 
+permissions:
+  contents: read
+
 jobs:
   codespell:
 
diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml
index 9f2bec289..fe6c2711e 100644
--- a/.github/workflows/compliance.yml
+++ b/.github/workflows/compliance.yml
@@ -5,6 +5,9 @@ on:
   schedule:
 - cron: "0 0 * * 3"
 
+permissions:
+  contents: read
+
 jobs:
   h2spec:
 name: h2spec
diff --git a/.github/workflows/contrib.yml b/.github/workflows/contrib.yml
index 53f6025ca..93387a458 100644
--- a/.github/workflows/contrib.yml
+++ b/.github/workflows/contrib.yml
@@ -3,6 +3,9 @@ name: Contrib
 on:
   push:
 
+permissions:
+  contents: read
+
 jobs:
   build:
 
diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml
index fd5a0e2d2..b3dd5ec52 100644
--- a/.github/workflows/coverity.yml
+++ b/.github/workflows/coverity.yml
@@ -9,6 +9,9 @@ on:
   schedule:
   - cron: "0 0 * * *"
 
+permissions:
+  contents: read
+
 jobs:
   scan:
 runs-on: ubuntu-latest
diff --git a/.github/workflows/musl.yml b/.github/workflows/musl.yml
index 8f6922486..19d82af7c 100644
--- a/.github/workflows/musl.yml
+++ b/.github/workflows/musl.yml
@@ -2,6 +2,9 @@ name: alpine/musl
 
 on: [push]
 
+permissions:
+  contents: read
+
 jobs:
   musl:
   name: gcc
diff --git a/.github/workflows/openssl-nodeprecated.yml 
b/.github/workflows/openssl-nodeprecated.yml
index 6833911e4..f6da38234 100644
--- a/.github/workflows/openssl-nodeprecated.yml
+++ b/.github/workflows/openssl-nodeprecated.yml
@@ -14,6 +14,9 @@ on:
   schedule:
 - cron: "0 0 * * 4"
 
+permissions:
+  contents: read
+
 jobs:
   test:
 
diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
index 1dc216eeb..4cdbdce5b 100644
--- a/.github/workflows/vtest.yml
+++ b/.github/workflows/vtest.yml
@@ -11,6 +11,9 @@ name: VTest
 on:
   push:
 
+permissions:
+  contents: read
+
 jobs:
   # The generate-matrix job generates the build matrix using JSON output
   # generated by .github/matrix.py.
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
index b5a198aff..42bb4e8c9 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/windows.yml
@@ -11,6 +11,9 @@ name: Windows
 on:
   push:
 
+permissions:
+  contents: read
+
 jobs:
   msys2:
 name: ${{ matrix.name }}
-- 
2.33.0




  1   2   3   4   5   >