This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git


The following commit(s) were added to refs/heads/master by this push:
     new d51545a952 [fix](ut)(memory-leak) Fix be asan ut failed and hdfs file 
reader memory leak (#8905)
d51545a952 is described below

commit d51545a9528927b8d615e9e02d9c6808e455a78b
Author: caiconghui <[email protected]>
AuthorDate: Fri Apr 8 00:07:00 2022 +0800

    [fix](ut)(memory-leak) Fix be asan ut failed and hdfs file reader memory 
leak (#8905)
---
 be/src/agent/task_worker_pool.cpp      | 11 ++++-------
 be/src/agent/task_worker_pool.h        |  5 ++---
 be/src/exec/hdfs_file_reader.cpp       |  1 +
 be/test/exec/hdfs_file_reader_test.cpp |  1 +
 4 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/be/src/agent/task_worker_pool.cpp 
b/be/src/agent/task_worker_pool.cpp
index 775f205330..f68196112a 100644
--- a/be/src/agent/task_worker_pool.cpp
+++ b/be/src/agent/task_worker_pool.cpp
@@ -1484,10 +1484,9 @@ void TaskWorkerPool::_release_snapshot_thread_callback() 
{
 }
 
 Status TaskWorkerPool::_get_tablet_info(const TTabletId tablet_id,
-                                             const TSchemaHash schema_hash, 
int64_t signature,
-                                             TTabletInfo* tablet_info) {
+                                        const TSchemaHash schema_hash, int64_t 
signature,
+                                        TTabletInfo* tablet_info) {
     Status status = Status::OK();
-
     tablet_info->__set_tablet_id(tablet_id);
     tablet_info->__set_schema_hash(schema_hash);
     OLAPStatus olap_status =
@@ -1520,8 +1519,7 @@ void TaskWorkerPool::_move_dir_thread_callback() {
         LOG(INFO) << "get move dir task, signature:" << 
agent_task_req.signature
                   << ", job id:" << move_dir_req.job_id;
         Status status =
-                _move_dir(move_dir_req.tablet_id, move_dir_req.schema_hash, 
move_dir_req.src,
-                          move_dir_req.job_id, true /* TODO */);
+                _move_dir(move_dir_req.tablet_id, move_dir_req.src, 
move_dir_req.job_id, true /* TODO */);
 
         if (!status.ok()) {
             LOG(WARNING) << "failed to move dir: " << move_dir_req.src
@@ -1546,8 +1544,7 @@ void TaskWorkerPool::_move_dir_thread_callback() {
     }
 }
 
-Status TaskWorkerPool::_move_dir(const TTabletId tablet_id, const TSchemaHash 
schema_hash,
-                                      const std::string& src, int64_t job_id, 
bool overwrite) {
+Status TaskWorkerPool::_move_dir(const TTabletId tablet_id, const std::string& 
src, int64_t job_id, bool overwrite) {
     TabletSharedPtr tablet =
             StorageEngine::instance()->tablet_manager()->get_tablet(tablet_id);
     if (tablet == nullptr) {
diff --git a/be/src/agent/task_worker_pool.h b/be/src/agent/task_worker_pool.h
index 4181d0ce50..55685137f9 100644
--- a/be/src/agent/task_worker_pool.h
+++ b/be/src/agent/task_worker_pool.h
@@ -203,10 +203,9 @@ private:
     void _handle_report(TReportRequest& request, ReportType type);
 
     Status _get_tablet_info(const TTabletId tablet_id, const TSchemaHash 
schema_hash,
-                                 int64_t signature, TTabletInfo* tablet_info);
+                            int64_t signature, TTabletInfo* tablet_info);
 
-    Status _move_dir(const TTabletId tablet_id, const TSchemaHash schema_hash,
-                          const std::string& src, int64_t job_id, bool 
overwrite);
+    Status _move_dir(const TTabletId tablet_id, const std::string& src, 
int64_t job_id, bool overwrite);
 
     OLAPStatus _check_migrate_request(const TStorageMediumMigrateReq& req, 
TabletSharedPtr& tablet,
                                       DataDir** dest_store);
diff --git a/be/src/exec/hdfs_file_reader.cpp b/be/src/exec/hdfs_file_reader.cpp
index d97d7c26cf..25e3a09638 100644
--- a/be/src/exec/hdfs_file_reader.cpp
+++ b/be/src/exec/hdfs_file_reader.cpp
@@ -65,6 +65,7 @@ Status HdfsFileReader::connect() {
         }
     }
     _hdfs_fs = hdfsBuilderConnect(hdfs_builder);
+    hdfsFreeBuilder(hdfs_builder);
     if (_hdfs_fs == nullptr) {
         std::stringstream ss;
         ss << "connect to hdfs failed. namenode address:" << _namenode
diff --git a/be/test/exec/hdfs_file_reader_test.cpp 
b/be/test/exec/hdfs_file_reader_test.cpp
index 11e4331f2e..f67e08a30c 100644
--- a/be/test/exec/hdfs_file_reader_test.cpp
+++ b/be/test/exec/hdfs_file_reader_test.cpp
@@ -29,6 +29,7 @@ TEST_F(HdfsFileReaderTest, test_connect_fail) {
     hdfsParams.fs_name = "hdfs://127.0.0.1:8888"; // An invalid address
     HdfsFileReader hdfs_file_reader(hdfsParams, "/user/foo/test.data", 0);
     Status status = hdfs_file_reader.open();
+    hdfs_file_reader.close();
     std::string msg = status.get_error_msg();
     ASSERT_TRUE(msg.find("Connection refused") >= 0);
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to