This is an automated email from the ASF dual-hosted git repository.

hellostephen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 3a0cc6a5acf [fix](cloud)Not log stack information by Peer read (#57816)
3a0cc6a5acf is described below

commit 3a0cc6a5acf5167517786805d8a5106b7b106680
Author: deardeng <[email protected]>
AuthorDate: Mon Nov 10 15:05:57 2025 +0800

    [fix](cloud)Not log stack information by Peer read (#57816)
    
    ### What problem does this PR solve?
    
    Do not print stack information
    1. Fix too much stack trace in the log consumes too much log disk space.
---
 be/src/cloud/cloud_internal_service.cpp       |  4 ++--
 be/src/io/cache/cached_remote_file_reader.cpp |  2 +-
 be/src/io/cache/peer_file_cache_reader.cpp    | 13 +++++++------
 3 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/be/src/cloud/cloud_internal_service.cpp 
b/be/src/cloud/cloud_internal_service.cpp
index 957a3febc07..5363fc7479a 100644
--- a/be/src/cloud/cloud_internal_service.cpp
+++ b/be/src/cloud/cloud_internal_service.cpp
@@ -223,7 +223,7 @@ Status handle_peer_file_cache_block_request(const 
PFetchPeerDataRequest* request
     if (cache == nullptr) {
         g_file_cache_get_by_peer_failed_num << 1;
         set_error_response(response, "can't get file cache instance");
-        return Status::InternalError("can't get file cache instance");
+        return Status::InternalError<false>("can't get file cache instance");
     }
 
     io::CacheContext ctx {};
@@ -240,7 +240,7 @@ Status handle_peer_file_cache_block_request(const 
PFetchPeerDataRequest* request
                 g_file_cache_get_by_peer_failed_num << 1;
                 LOG(WARNING) << "read cache block failed, state=" << 
fb->state();
                 set_error_response(response, "read cache file error");
-                return Status::InternalError("cache block not downloaded");
+                return Status::InternalError<false>("cache block not 
downloaded");
             }
 
             g_file_cache_get_by_peer_blocks_num << 1;
diff --git a/be/src/io/cache/cached_remote_file_reader.cpp 
b/be/src/io/cache/cached_remote_file_reader.cpp
index 60d14b841e1..cce96adb3e5 100644
--- a/be/src/io/cache/cached_remote_file_reader.cpp
+++ b/be/src/io/cache/cached_remote_file_reader.cpp
@@ -205,7 +205,7 @@ Status execute_peer_read(const std::vector<FileBlockSPtr>& 
empty_blocks, size_t
         LOG_EVERY_N(WARNING, 100) << "PeerFileCacheReader host or port is 
empty"
                                   << ", host=" << host << ", port=" << port
                                   << ", file_path=" << file_path;
-        return Status::InternalError("host or port is empty");
+        return Status::InternalError<false>("host or port is empty");
     }
     SCOPED_RAW_TIMER(&stats.peer_read_timer);
     peer_read_counter << 1;
diff --git a/be/src/io/cache/peer_file_cache_reader.cpp 
b/be/src/io/cache/peer_file_cache_reader.cpp
index c034cdce110..a3ba09cb2b4 100644
--- a/be/src/io/cache/peer_file_cache_reader.cpp
+++ b/be/src/io/cache/peer_file_cache_reader.cpp
@@ -74,7 +74,7 @@ Status PeerFileCacheReader::fetch_blocks(const 
std::vector<FileBlockSPtr>& block
         return Status::OK();
     }
     if (!_is_doris_table) {
-        return Status::NotSupported("peer cache fetch only supports doris 
table segments");
+        return Status::NotSupported<false>("peer cache fetch only supports 
doris table segments");
     }
 
     PFetchPeerDataRequest req;
@@ -97,7 +97,7 @@ Status PeerFileCacheReader::fetch_blocks(const 
std::vector<FileBlockSPtr>& block
         if (!status.ok()) {
             peer_cache_reader_failed_counter << 1;
             LOG(WARNING) << "failed to get ip from host " << _host << ": " << 
status.to_string();
-            return Status::InternalError("failed to get ip from host {}", 
_host);
+            return Status::InternalError<false>("failed to get ip from host 
{}", _host);
         }
     }
     std::string brpc_addr = get_host_port(realhost, port);
@@ -108,7 +108,7 @@ Status PeerFileCacheReader::fetch_blocks(const 
std::vector<FileBlockSPtr>& block
     if (!brpc_stub) {
         peer_cache_reader_failed_counter << 1;
         LOG(WARNING) << "failed to get brpc stub " << brpc_addr;
-        st = Status::RpcError("Address {} is wrong", brpc_addr);
+        st = Status::RpcError<false>("Address {} is wrong", brpc_addr);
         return st;
     }
     LIMIT_REMOTE_SCAN_IO(bytes_read);
@@ -128,7 +128,7 @@ Status PeerFileCacheReader::fetch_blocks(const 
std::vector<FileBlockSPtr>& block
     peer_cache_reader_read_counter << 1;
     brpc_stub->fetch_peer_data(&cntl, &req, &resp, nullptr);
     if (cntl.Failed()) {
-        return Status::RpcError(cntl.ErrorText());
+        return Status::RpcError<false>(cntl.ErrorText());
     }
     if (resp.has_status()) {
         Status st2 = Status::create(resp.status());
@@ -140,7 +140,7 @@ Status PeerFileCacheReader::fetch_blocks(const 
std::vector<FileBlockSPtr>& block
         if (data.data().empty()) {
             peer_cache_reader_failed_counter << 1;
             LOG(WARNING) << "peer cache read empty data" << 
data.block_offset();
-            return Status::InternalError("peer cache read empty data");
+            return Status::InternalError<false>("peer cache read empty data");
         }
         int64_t block_off = data.block_offset();
         size_t rel = block_off > static_cast<int64_t>(off)
@@ -158,7 +158,8 @@ Status PeerFileCacheReader::fetch_blocks(const 
std::vector<FileBlockSPtr>& block
     peer_bytes_per_read << filled;
     if (filled != s.size) {
         peer_cache_reader_failed_counter << 1;
-        return Status::InternalError("peer cache read incomplete: need={}, 
got={}", s.size, filled);
+        return Status::InternalError<false>("peer cache read incomplete: 
need={}, got={}", s.size,
+                                            filled);
     }
     peer_cache_reader_succ_counter << 1;
     return Status::OK();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to