This is an automated email from the ASF dual-hosted git repository.
bcall pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/master by this push:
new bd851ac982 Fix potential buffer overflow in http_load snprintf calls
(#12723)
bd851ac982 is described below
commit bd851ac9829a45edebebeeb5f688b66d8a8789f0
Author: Bryan Call <[email protected]>
AuthorDate: Wed Dec 10 09:40:04 2025 -0800
Fix potential buffer overflow in http_load snprintf calls (#12723)
The snprintf function returns the number of characters that would have
been written if the buffer was large enough, not the number actually
written. This could cause hdr_bytes and req_bytes to increment beyond
the buffer size, leading to out-of-bounds writes in subsequent snprintf
calls.
Fixed by:
- Checking snprintf return value before incrementing offset
- Only incrementing if return value is positive and less than remaining
space
- Applied to all snprintf calls that use += pattern in read_url_file()
- Using braces for all conditional statements
---
tools/http_load/http_load.c | 79 ++++++++++++++++++++++++++++++++++-----------
1 file changed, 60 insertions(+), 19 deletions(-)
diff --git a/tools/http_load/http_load.c b/tools/http_load/http_load.c
index 4db070727e..e274398085 100644
--- a/tools/http_load/http_load.c
+++ b/tools/http_load/http_load.c
@@ -1,6 +1,6 @@
/* http_load - multiprocessing http test client
**
-** Copyright � 1998,1999,2001 by Jef Poskanzer <[email protected]>.
+** Copyright � 1998,1999,2001 by Jef Poskanzer <[email protected]>.
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
@@ -648,18 +648,43 @@ read_url_file(char *url_file)
constructed by the URL host and possibly port (if not port 80) */
char hdr_buf[2048];
- int hdr_bytes = 0;
- hdr_bytes += snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes,
"User-Agent: %s\r\n", user_agent);
- if (cookie)
- hdr_bytes += snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes,
"Cookie: %s\r\n", cookie);
- if (do_accept_gzip)
- hdr_bytes += snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes,
"Accept-Encoding: gzip\r\n");
+ int hdr_bytes = 0;
+ int n;
+
+ n = snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes, "User-Agent:
%s\r\n", user_agent);
+ if (n > 0 && (size_t)n < sizeof(hdr_buf) - hdr_bytes) {
+ hdr_bytes += n;
+ }
+
+ if (cookie) {
+ n = snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes, "Cookie:
%s\r\n", cookie);
+ if (n > 0 && (size_t)n < sizeof(hdr_buf) - hdr_bytes) {
+ hdr_bytes += n;
+ }
+ }
+
+ if (do_accept_gzip) {
+ n = snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes,
"Accept-Encoding: gzip\r\n");
+ if (n > 0 && (size_t)n < sizeof(hdr_buf) - hdr_bytes) {
+ hdr_bytes += n;
+ }
+ }
+
/* Add Connection: keep-alive header if keep_alive requested, and version !=
"1.1" */
- if ((keep_alive > 0) && !is_http_1_1)
- hdr_bytes += snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes,
"Connection: keep-alive\r\n");
+ if ((keep_alive > 0) && !is_http_1_1) {
+ n = snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes,
"Connection: keep-alive\r\n");
+ if (n > 0 && (size_t)n < sizeof(hdr_buf) - hdr_bytes) {
+ hdr_bytes += n;
+ }
+ }
+
if (extra_headers != NULL) {
- hdr_bytes += snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes,
"%s\r\n", extra_headers);
+ n = snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes, "%s\r\n",
extra_headers);
+ if (n > 0 && (size_t)n < sizeof(hdr_buf) - hdr_bytes) {
+ hdr_bytes += n;
+ }
}
+
snprintf(&hdr_buf[hdr_bytes], sizeof(hdr_buf) - hdr_bytes, "\r\n");
while (fgets(line, sizeof(line), fp) != (char *)0) {
@@ -725,20 +750,36 @@ read_url_file(char *url_file)
req_bytes = snprintf(req_buf, sizeof(req_buf), "GET %.500s HTTP/%s\r\n",
urls[num_urls].filename, http_version);
if (extra_headers == NULL || !strstr(extra_headers, "Host:")) {
- if (urls[num_urls].port != 80)
- req_bytes += snprintf(&req_buf[req_bytes], sizeof(req_buf) -
req_bytes, "Host: %s:%d\r\n", urls[num_urls].hostname,
- urls[num_urls].port);
- else
- req_bytes += snprintf(&req_buf[req_bytes], sizeof(req_buf) -
req_bytes, "Host: %s\r\n", urls[num_urls].hostname);
+ if (urls[num_urls].port != 80) {
+ n = snprintf(&req_buf[req_bytes], sizeof(req_buf) - req_bytes, "Host:
%s:%d\r\n", urls[num_urls].hostname,
+ urls[num_urls].port);
+ if (n > 0 && (size_t)n < sizeof(req_buf) - req_bytes) {
+ req_bytes += n;
+ }
+ } else {
+ n = snprintf(&req_buf[req_bytes], sizeof(req_buf) - req_bytes, "Host:
%s\r\n", urls[num_urls].hostname);
+ if (n > 0 && (size_t)n < sizeof(req_buf) - req_bytes) {
+ req_bytes += n;
+ }
+ }
}
if (unique_id == 1) {
- req_bytes += snprintf(&req_buf[req_bytes],
sizeof(req_buf) - req_bytes, "X-ID: ");
- urls[num_urls].unique_id_offset = req_bytes;
- req_bytes += snprintf(&req_buf[req_bytes],
sizeof(req_buf) - req_bytes, "%09u\r\n", 0);
+ n = snprintf(&req_buf[req_bytes], sizeof(req_buf) - req_bytes, "X-ID: ");
+ if (n > 0 && (size_t)n < sizeof(req_buf) - req_bytes) {
+ req_bytes += n;
+ urls[num_urls].unique_id_offset = req_bytes;
+ n = snprintf(&req_buf[req_bytes],
sizeof(req_buf) - req_bytes, "%09u\r\n", 0);
+ if (n > 0 && (size_t)n < sizeof(req_buf) - req_bytes) {
+ req_bytes += n;
+ }
+ }
}
// add the common hdr here
- req_bytes += snprintf(&req_buf[req_bytes], sizeof(req_buf) - req_bytes,
hdr_buf, 0);
+ n = snprintf(&req_buf[req_bytes], sizeof(req_buf) - req_bytes, hdr_buf, 0);
+ if (n > 0 && (size_t)n < sizeof(req_buf) - req_bytes) {
+ req_bytes += n;
+ }
urls[num_urls].buf_bytes = req_bytes;
urls[num_urls].buf = strdup_check(req_buf);