This is an automated email from the ASF dual-hosted git repository.

gabriellee pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 875631433d4 [minor](debug) Delete unused debug logs (#50054)
875631433d4 is described below

commit 875631433d412c6f14e899d088e436ccb90b2546
Author: Gabriel <[email protected]>
AuthorDate: Wed Apr 16 08:06:37 2025 +0800

    [minor](debug) Delete unused debug logs (#50054)
---
 be/src/pipeline/pipeline_task.cpp  |  2 --
 be/src/pipeline/pipeline_task.h    | 29 -----------------------------
 be/src/pipeline/task_scheduler.cpp |  2 --
 3 files changed, 33 deletions(-)

diff --git a/be/src/pipeline/pipeline_task.cpp 
b/be/src/pipeline/pipeline_task.cpp
index 3d3d31964b8..1b75b6d8dc1 100644
--- a/be/src/pipeline/pipeline_task.cpp
+++ b/be/src/pipeline/pipeline_task.cpp
@@ -81,8 +81,6 @@ PipelineTask::PipelineTask(PipelinePtr& pipeline, uint32_t 
task_id, RuntimeState
           _execution_dep(state->get_query_ctx()->get_execution_dependency()),
           
_memory_sufficient_dependency(state->get_query_ctx()->get_memory_sufficient_dependency()),
           _pipeline_name(_pipeline->name()) {
-    _pipeline_task_watcher.start();
-
     if (!_shared_state_map.contains(_sink->dests_id().front())) {
         auto shared_state = _sink->create_shared_state();
         if (shared_state) {
diff --git a/be/src/pipeline/pipeline_task.h b/be/src/pipeline/pipeline_task.h
index b1eedcc3878..3e18777d572 100644
--- a/be/src/pipeline/pipeline_task.h
+++ b/be/src/pipeline/pipeline_task.h
@@ -152,32 +152,6 @@ public:
     bool is_revoking() const;
     bool set_running(bool running) { return _running.exchange(running); }
 
-    bool is_exceed_debug_timeout() {
-        if (_has_exceed_timeout) {
-            return true;
-        }
-        // If enable_debug_log_timeout_secs <= 0, then disable the log
-        if (_pipeline_task_watcher.elapsed_time() >
-            config::enable_debug_log_timeout_secs * 1000L * 1000L * 1000L) {
-            _has_exceed_timeout = true;
-            return true;
-        }
-        return false;
-    }
-
-    void log_detail_if_need() {
-        if (config::enable_debug_log_timeout_secs < 1) {
-            return;
-        }
-        if (is_exceed_debug_timeout()) {
-            LOG(INFO) << "query id|instanceid " << 
print_id(_state->query_id()) << "|"
-                      << print_id(_state->fragment_instance_id())
-                      << " current pipeline exceed run time "
-                      << config::enable_debug_log_timeout_secs << " seconds. "
-                      << "/n task detail:" << debug_string();
-        }
-    }
-
     RuntimeState* runtime_state() const { return _state; }
 
     std::string task_name() const { return fmt::format("task{}({})", _index, 
_pipeline->_name); }
@@ -215,7 +189,6 @@ private:
     const TUniqueId _query_id;
     const uint32_t _index;
     PipelinePtr _pipeline;
-    bool _has_exceed_timeout = false;
     bool _opened;
     RuntimeState* _state = nullptr;
     int _core_id = -1;
@@ -254,8 +227,6 @@ private:
     RuntimeProfile::Counter* _memory_reserve_times = nullptr;
     RuntimeProfile::Counter* _memory_reserve_failed_times = nullptr;
 
-    MonotonicStopWatch _pipeline_task_watcher;
-
     Operators _operators; // left is _source, right is _root
     OperatorXBase* _source;
     OperatorXBase* _root;
diff --git a/be/src/pipeline/task_scheduler.cpp 
b/be/src/pipeline/task_scheduler.cpp
index 1e82ef262b6..fbbf3720098 100644
--- a/be/src/pipeline/task_scheduler.cpp
+++ b/be/src/pipeline/task_scheduler.cpp
@@ -132,8 +132,6 @@ void TaskScheduler::_do_work(int index) {
             }
         }};
         task->set_task_queue(&_task_queue);
-        task->log_detail_if_need();
-
         bool canceled = fragment_context->is_canceled();
 
         // Close task if canceled


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to