This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 6736f02f279 [enhancement](cloud) add bvar to monitor s3 throughput&QPS 
(#34087)
6736f02f279 is described below

commit 6736f02f279a32d5776f3b04b9516cd5b58e76d9
Author: zhengyu <[email protected]>
AuthorDate: Thu Apr 25 21:14:06 2024 +0800

    [enhancement](cloud) add bvar to monitor s3 throughput&QPS (#34087)
    
    Signed-off-by: freemandealer <[email protected]>
---
 be/src/io/fs/hdfs_file_reader.cpp | 13 +++++++++++++
 be/src/io/fs/s3_file_reader.cpp   |  5 +++++
 2 files changed, 18 insertions(+)

diff --git a/be/src/io/fs/hdfs_file_reader.cpp 
b/be/src/io/fs/hdfs_file_reader.cpp
index 358663b65d0..a26448c90e2 100644
--- a/be/src/io/fs/hdfs_file_reader.cpp
+++ b/be/src/io/fs/hdfs_file_reader.cpp
@@ -24,6 +24,8 @@
 #include <ostream>
 #include <utility>
 
+#include "bvar/latency_recorder.h"
+#include "bvar/reducer.h"
 #include "common/compiler_util.h" // IWYU pragma: keep
 #include "common/logging.h"
 #include "common/sync_point.h"
@@ -33,6 +35,13 @@
 #include "util/doris_metrics.h"
 
 namespace doris::io {
+
+bvar::Adder<uint64_t> hdfs_bytes_read_total("hdfs_file_reader", "bytes_read");
+bvar::LatencyRecorder hdfs_bytes_per_read("hdfs_file_reader", 
"bytes_per_read"); // also QPS
+bvar::PerSecond<bvar::Adder<uint64_t>> 
hdfs_read_througthput("hdfs_file_reader",
+                                                             
"hdfs_read_throughput",
+                                                             
&hdfs_bytes_read_total);
+
 namespace {
 
 Result<FileHandleCache::Accessor> get_file(const hdfsFS& fs, const Path& file, 
int64_t mtime,
@@ -148,6 +157,8 @@ Status HdfsFileReader::read_at_impl(size_t offset, Slice 
result, size_t* bytes_r
         has_read += loop_read;
     }
     *bytes_read = has_read;
+    hdfs_bytes_read_total << *bytes_read;
+    hdfs_bytes_per_read << *bytes_read;
     return Status::OK();
 }
 
@@ -206,6 +217,8 @@ Status HdfsFileReader::read_at_impl(size_t offset, Slice 
result, size_t* bytes_r
         has_read += loop_read;
     }
     *bytes_read = has_read;
+    hdfs_bytes_read_total << *bytes_read;
+    hdfs_bytes_per_read << *bytes_read;
     return Status::OK();
 }
 #endif
diff --git a/be/src/io/fs/s3_file_reader.cpp b/be/src/io/fs/s3_file_reader.cpp
index 2bd40fbbf43..68acbf47eb1 100644
--- a/be/src/io/fs/s3_file_reader.cpp
+++ b/be/src/io/fs/s3_file_reader.cpp
@@ -23,6 +23,7 @@
 #include <aws/s3/S3Errors.h>
 #include <aws/s3/model/GetObjectRequest.h>
 #include <aws/s3/model/GetObjectResult.h>
+#include <bvar/latency_recorder.h>
 #include <bvar/reducer.h>
 #include <fmt/format.h>
 #include <glog/logging.h>
@@ -43,6 +44,9 @@ bvar::Adder<uint64_t> 
s3_file_reader_read_counter("s3_file_reader", "read_at");
 bvar::Adder<uint64_t> s3_file_reader_total("s3_file_reader", "total_num");
 bvar::Adder<uint64_t> s3_bytes_read_total("s3_file_reader", "bytes_read");
 bvar::Adder<uint64_t> s3_file_being_read("s3_file_reader", "file_being_read");
+bvar::LatencyRecorder s3_bytes_per_read("s3_file_reader", "bytes_per_read"); 
// also QPS
+bvar::PerSecond<bvar::Adder<uint64_t>> s3_read_througthput("s3_file_reader", 
"s3_read_throughput",
+                                                           
&s3_bytes_read_total);
 
 Result<FileReaderSPtr> S3FileReader::create(std::shared_ptr<const 
S3ClientHolder> client,
                                             std::string bucket, std::string 
key,
@@ -125,6 +129,7 @@ Status S3FileReader::read_at_impl(size_t offset, Slice 
result, size_t* bytes_rea
                                      _path.native(), *bytes_read, bytes_req);
     }
     s3_bytes_read_total << *bytes_read;
+    s3_bytes_per_read << *bytes_read;
     s3_file_reader_read_counter << 1;
     DorisMetrics::instance()->s3_bytes_read_total->increment(*bytes_read);
     return Status::OK();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to