This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 5202e8ad55f  [fix](tracing) Fix wrong global variable init order of 
tracing log dir (#34454)
5202e8ad55f is described below

commit 5202e8ad55fe74da57fda33264b698893ad4d192
Author: zclllyybb <[email protected]>
AuthorDate: Wed May 8 10:45:28 2024 +0800

     [fix](tracing) Fix wrong global variable init order of tracing log dir 
(#34454)
---
 be/src/pipeline/pipeline_tracing.cpp | 6 ++----
 be/src/pipeline/pipeline_tracing.h   | 3 +++
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/be/src/pipeline/pipeline_tracing.cpp 
b/be/src/pipeline/pipeline_tracing.cpp
index abedc4ff4f4..047e3c3a01d 100644
--- a/be/src/pipeline/pipeline_tracing.cpp
+++ b/be/src/pipeline/pipeline_tracing.cpp
@@ -35,8 +35,6 @@
 
 namespace doris::pipeline {
 
-std::filesystem::path log_dir = fmt::format("{}/pipe_tracing", 
getenv("LOG_DIR"));
-
 void PipelineTracerContext::record(ScheduleRecord record) {
     if (_dump_type == RecordType::None) [[unlikely]] {
         return;
@@ -97,7 +95,7 @@ Status PipelineTracerContext::change_record_params(
 void PipelineTracerContext::_dump_query(TUniqueId query_id) {
     //TODO: when dump, now could append records but can't add new query. try 
use better grained locks.
     std::unique_lock<std::mutex> l(_data_lock); // can't rehash
-    auto path = log_dir / fmt::format("query{}", to_string(query_id));
+    auto path = _log_dir / fmt::format("query{}", to_string(query_id));
     int fd = ::open(path.c_str(), O_CREAT | O_WRONLY | O_TRUNC,
                     S_ISGID | S_ISUID | S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP 
| S_IWOTH | S_IROTH);
     if (fd < 0) [[unlikely]] {
@@ -134,7 +132,7 @@ void PipelineTracerContext::_dump_timeslice() {
     std::unique_lock<std::mutex> l(_data_lock); // can't rehash
 
     //TODO: if long time, per timeslice per file
-    auto path = log_dir /
+    auto path = _log_dir /
                 fmt::format("until{}", 
std::chrono::steady_clock::now().time_since_epoch().count());
     int fd = ::open(path.c_str(), O_CREAT | O_WRONLY | O_TRUNC,
                     S_ISGID | S_ISUID | S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP 
| S_IWOTH | S_IROTH);
diff --git a/be/src/pipeline/pipeline_tracing.h 
b/be/src/pipeline/pipeline_tracing.h
index eb0f2ac684a..27dda6b85f1 100644
--- a/be/src/pipeline/pipeline_tracing.h
+++ b/be/src/pipeline/pipeline_tracing.h
@@ -23,6 +23,7 @@
 #include <parallel_hashmap/phmap.h>
 
 #include <cstdint>
+#include <filesystem>
 
 #include "common/config.h"
 #include "util/hash_util.hpp" // IWYU pragma: keep
@@ -70,6 +71,8 @@ private:
     void _dump_query(TUniqueId query_id);
     void _dump_timeslice();
 
+    std::filesystem::path _log_dir = fmt::format("{}/pipe_tracing", 
getenv("LOG_DIR"));
+
     std::mutex _data_lock; // lock for map, not map items.
     phmap::flat_hash_map<TUniqueId, OneQueryTraces> _datas;
     std::mutex _tg_lock; //TODO: use an lockfree DS


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to