This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.1 by this push:
new b7ded71bfb9 [fix](stream-load) catch exception when parsing
CONTENT_LENGTH (#40796) (#40988)
b7ded71bfb9 is described below
commit b7ded71bfb96324b17b88978608634e9f8ec7005
Author: Kaijie Chen <[email protected]>
AuthorDate: Thu Sep 19 22:15:08 2024 +0800
[fix](stream-load) catch exception when parsing CONTENT_LENGTH (#40796)
(#40988)
backport #40796
---
be/src/http/action/http_stream.cpp | 15 +++++++++++++--
be/src/http/action/stream_load.cpp | 15 +++++++++++++--
2 files changed, 26 insertions(+), 4 deletions(-)
diff --git a/be/src/http/action/http_stream.cpp
b/be/src/http/action/http_stream.cpp
index 83ce0ce82cc..6a648c93150 100644
--- a/be/src/http/action/http_stream.cpp
+++ b/be/src/http/action/http_stream.cpp
@@ -197,7 +197,12 @@ Status HttpStreamAction::_on_header(HttpRequest* http_req,
std::shared_ptr<Strea
ctx->body_bytes = 0;
size_t csv_max_body_bytes = config::streaming_load_max_mb * 1024 * 1024;
if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) {
- ctx->body_bytes =
std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
+ try {
+ ctx->body_bytes =
std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
+ } catch (const std::exception& e) {
+ return Status::InvalidArgument("invalid HTTP header
CONTENT_LENGTH={}: {}",
+
http_req->header(HttpHeaders::CONTENT_LENGTH), e.what());
+ }
// csv max body size
if (ctx->body_bytes > csv_max_body_bytes) {
LOG(WARNING) << "body exceed max size." << ctx->brief();
@@ -349,7 +354,13 @@ Status HttpStreamAction::process_put(HttpRequest* http_req,
// FIXME find a way to avoid chunked stream load write large WALs
size_t content_length = 0;
if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) {
- content_length =
std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
+ try {
+ content_length =
std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
+ } catch (const std::exception& e) {
+ return Status::InvalidArgument("invalid HTTP header
CONTENT_LENGTH={}: {}",
+
http_req->header(HttpHeaders::CONTENT_LENGTH),
+ e.what());
+ }
if (ctx->format == TFileFormatType::FORMAT_CSV_GZ ||
ctx->format == TFileFormatType::FORMAT_CSV_LZO ||
ctx->format == TFileFormatType::FORMAT_CSV_BZ2 ||
diff --git a/be/src/http/action/stream_load.cpp
b/be/src/http/action/stream_load.cpp
index 2036043b4d4..bcc55ae4bfd 100644
--- a/be/src/http/action/stream_load.cpp
+++ b/be/src/http/action/stream_load.cpp
@@ -265,7 +265,12 @@ Status StreamLoadAction::_on_header(HttpRequest* http_req,
std::shared_ptr<Strea
}
}
if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) {
- ctx->body_bytes =
std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
+ try {
+ ctx->body_bytes =
std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
+ } catch (const std::exception& e) {
+ return Status::InvalidArgument("invalid HTTP header
CONTENT_LENGTH={}: {}",
+
http_req->header(HttpHeaders::CONTENT_LENGTH), e.what());
+ }
// json max body size
if ((ctx->format == TFileFormatType::FORMAT_JSON) &&
(ctx->body_bytes > json_max_body_bytes) && !read_json_by_line) {
@@ -663,7 +668,13 @@ Status StreamLoadAction::_process_put(HttpRequest*
http_req,
// FIXME find a way to avoid chunked stream load write large WALs
size_t content_length = 0;
if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) {
- content_length =
std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
+ try {
+ content_length =
std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
+ } catch (const std::exception& e) {
+ return Status::InvalidArgument("invalid HTTP header
CONTENT_LENGTH={}: {}",
+
http_req->header(HttpHeaders::CONTENT_LENGTH),
+ e.what());
+ }
if (ctx->format == TFileFormatType::FORMAT_CSV_GZ ||
ctx->format == TFileFormatType::FORMAT_CSV_LZO ||
ctx->format == TFileFormatType::FORMAT_CSV_BZ2 ||
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]