This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 3304f62d573a9c734503d8ebd80a79ba05527d9a
Author: Siyang Tang <[email protected]>
AuthorDate: Wed Jul 12 17:32:56 2023 +0800

    [fix](multi-table-load) fix memory leak when processing multi-table routine 
load (#21611)
    
    * use naked ptr to prevent loop ref
    
    * add comments
---
 be/src/io/fs/multi_table_pipe.h | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/be/src/io/fs/multi_table_pipe.h b/be/src/io/fs/multi_table_pipe.h
index b54e39a5a3..b225ea3061 100644
--- a/be/src/io/fs/multi_table_pipe.h
+++ b/be/src/io/fs/multi_table_pipe.h
@@ -32,7 +32,7 @@ class MultiTablePipe : public KafkaConsumerPipe {
 public:
     MultiTablePipe(std::shared_ptr<StreamLoadContext> ctx, size_t 
max_buffered_bytes = 1024 * 1024,
                    size_t min_chunk_size = 64 * 1024)
-            : KafkaConsumerPipe(max_buffered_bytes, min_chunk_size), _ctx(ctx) 
{}
+            : KafkaConsumerPipe(max_buffered_bytes, min_chunk_size), 
_ctx(ctx.get()) {}
 
     ~MultiTablePipe() override = default;
 
@@ -74,7 +74,10 @@ private:
     std::atomic<uint64_t> _unplanned_row_cnt {0}; // trigger plan request when 
exceed threshold
     std::atomic<uint64_t> _inflight_plan_cnt {0}; // how many plan fragment 
are executing?
     std::atomic<bool> _consume_finished {false};
-    std::shared_ptr<StreamLoadContext> _ctx;
+    // note: Use raw pointer here to avoid cycle reference with 
StreamLoadContext.
+    // Life cycle of MultiTablePipe is under control of StreamLoadContext, 
which means StreamLoadContext is created
+    // before NultiTablePipe and released after it. It is safe to use raw 
pointer here.
+    StreamLoadContext* _ctx;
     Status _status; // save the first error status of all executing plan 
fragment
 #ifndef BE_TEST
     std::mutex _tablet_commit_infos_lock;


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to