This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
     new fe7126d5504 [fix](inverted index)Support failover when index 
compaction failed (#29553) (#29669)
fe7126d5504 is described below

commit fe7126d5504454b2e85147c9755cbc7ef7ed9789
Author: qiye <[email protected]>
AuthorDate: Mon Jan 8 23:48:03 2024 +0800

    [fix](inverted index)Support failover when index compaction failed (#29553) 
(#29669)
---
 be/src/common/status.h                             |   1 +
 be/src/olap/compaction.cpp                         |  88 ++++--
 be/src/olap/rowset/rowset.h                        |  11 +
 .../segment_v2/inverted_index_compaction.cpp       |  16 +-
 .../test_index_compaction_fault_injection.out      | 291 +++++++++++++++++++
 .../test_index_compaction_fault_injection.groovy   | 307 +++++++++++++++++++++
 6 files changed, 689 insertions(+), 25 deletions(-)

diff --git a/be/src/common/status.h b/be/src/common/status.h
index 0b72329e568..f461f8f15e0 100644
--- a/be/src/common/status.h
+++ b/be/src/common/status.h
@@ -272,6 +272,7 @@ E(INVERTED_INDEX_RENAME_FILE_FAILED, -6006);
 E(INVERTED_INDEX_EVALUATE_SKIPPED, -6007);
 E(INVERTED_INDEX_BUILD_WAITTING, -6008);
 E(INVERTED_INDEX_NOT_IMPLEMENTED, -6009);
+E(INVERTED_INDEX_COMPACTION_ERROR, -6010);
 E(KEY_NOT_FOUND, -7000);
 E(KEY_ALREADY_EXISTS, -7001);
 E(ENTRY_NOT_FOUND, -7002);
diff --git a/be/src/olap/compaction.cpp b/be/src/olap/compaction.cpp
index 2cbd354b6de..b4b34cbacb8 100644
--- a/be/src/olap/compaction.cpp
+++ b/be/src/olap/compaction.cpp
@@ -401,7 +401,8 @@ Status Compaction::do_compaction_impl(int64_t permits) {
     // 3. check correctness
     RETURN_IF_ERROR(check_correctness(stats));
 
-    if (_input_row_num > 0 && stats.rowid_conversion && 
config::inverted_index_compaction_enable) {
+    if (_input_row_num > 0 && stats.rowid_conversion && 
config::inverted_index_compaction_enable &&
+        !ctx.skip_inverted_index.empty()) {
         OlapStopWatch inverted_watch;
 
         // check rowid_conversion correctness
@@ -480,25 +481,58 @@ Status Compaction::do_compaction_impl(int64_t permits) {
                       << ". tablet=" << _tablet->full_name()
                       << ", source index size=" << src_segment_num
                       << ", destination index size=" << dest_segment_num << 
".";
+            Status status = Status::OK();
             std::for_each(
                     ctx.skip_inverted_index.cbegin(), 
ctx.skip_inverted_index.cend(),
                     [&src_segment_num, &dest_segment_num, &index_writer_path, 
&src_index_files,
                      &dest_index_files, &fs, &tablet_path, &trans_vec, 
&dest_segment_num_rows,
-                     this](int32_t column_uniq_id) {
-                        auto st = compact_column(
-                                
_cur_tablet_schema->get_inverted_index(column_uniq_id)->index_id(),
-                                src_segment_num, dest_segment_num, 
src_index_files,
-                                dest_index_files, fs, index_writer_path, 
tablet_path, trans_vec,
-                                dest_segment_num_rows);
-                        if (!st.ok()) {
-                            LOG(ERROR) << "failed to do index compaction"
-                                       << ". tablet=" << _tablet->full_name()
-                                       << ". column uniq id=" << 
column_uniq_id << ". index_id= "
-                                       << 
_cur_tablet_schema->get_inverted_index(column_uniq_id)
-                                                  ->index_id();
+                     &status, this](int32_t column_uniq_id) {
+                        auto index_id =
+                                
_cur_tablet_schema->get_inverted_index(column_uniq_id)->index_id();
+                        try {
+                            auto st = compact_column(index_id, 
src_segment_num, dest_segment_num,
+                                                     src_index_files, 
dest_index_files, fs,
+                                                     index_writer_path, 
tablet_path, trans_vec,
+                                                     dest_segment_num_rows);
+                            if (!st.ok()) {
+                                LOG(WARNING) << "failed to do index compaction"
+                                             << ". tablet=" << 
_tablet->full_name()
+                                             << ". column uniq id=" << 
column_uniq_id
+                                             << ". index_id=" << index_id;
+                                for (auto& rowset : _input_rowsets) {
+                                    
rowset->set_skip_index_compaction(column_uniq_id);
+                                    LOG(INFO) << "mark skipping inverted index 
compaction next time"
+                                              << ". tablet=" << 
_tablet->full_name()
+                                              << ", rowset=" << 
rowset->rowset_id()
+                                              << ", column uniq id=" << 
column_uniq_id
+                                              << ", index_id=" << index_id;
+                                }
+                                status = 
Status::Error<ErrorCode::INVERTED_INDEX_COMPACTION_ERROR>(
+                                        st.msg());
+                            }
+                        } catch (CLuceneError& e) {
+                            LOG(WARNING) << "failed to do index compaction"
+                                         << ". tablet=" << _tablet->full_name()
+                                         << ", column uniq id=" << 
column_uniq_id
+                                         << ", index_id=" << index_id;
+                            for (auto& rowset : _input_rowsets) {
+                                
rowset->set_skip_index_compaction(column_uniq_id);
+                                LOG(INFO) << "mark skipping inverted index 
compaction next time"
+                                          << ". tablet=" << 
_tablet->full_name()
+                                          << ", rowset=" << rowset->rowset_id()
+                                          << ", column uniq id=" << 
column_uniq_id
+                                          << ", index_id=" << index_id;
+                            }
+                            status = 
Status::Error<ErrorCode::INVERTED_INDEX_COMPACTION_ERROR>(
+                                    e.what());
                         }
                     });
 
+            // check index compaction status. If status is not ok, we should 
return error and end this compaction round.
+            if (!status.ok()) {
+                return status;
+            }
+
             LOG(INFO) << "succeed to do index compaction"
                       << ". tablet=" << _tablet->full_name()
                       << ", input row number=" << _input_row_num
@@ -569,9 +603,9 @@ Status 
Compaction::construct_output_rowset_writer(RowsetWriterContext& ctx, bool
     if (config::inverted_index_compaction_enable &&
         ((_tablet->keys_type() == KeysType::UNIQUE_KEYS ||
           _tablet->keys_type() == KeysType::DUP_KEYS))) {
-        for (auto& index : _cur_tablet_schema->indexes()) {
+        for (const auto& index : _cur_tablet_schema->indexes()) {
             if (index.index_type() == IndexType::INVERTED) {
-                auto unique_id = index.col_unique_ids()[0];
+                auto col_unique_id = index.col_unique_ids()[0];
                 //NOTE: here src_rs may be in building index progress, so it 
would not contain inverted index info.
                 bool all_have_inverted_index = std::all_of(
                         _input_rowsets.begin(), _input_rowsets.end(), 
[&](const auto& src_rs) {
@@ -582,13 +616,21 @@ Status 
Compaction::construct_output_rowset_writer(RowsetWriterContext& ctx, bool
                                              << "] rowset is null, will skip 
index compaction";
                                 return false;
                             }
+                            if 
(rowset->is_skip_index_compaction(col_unique_id)) {
+                                LOG(WARNING)
+                                        << "tablet[" << _tablet->tablet_id() 
<< "] rowset["
+                                        << rowset->rowset_id() << "] 
column_unique_id["
+                                        << col_unique_id
+                                        << "] skip inverted index compaction 
due to last failure";
+                                return false;
+                            }
                             auto fs = rowset->rowset_meta()->fs();
 
-                            auto index_meta =
-                                    
rowset->tablet_schema()->get_inverted_index(unique_id);
+                            const auto* index_meta =
+                                    
rowset->tablet_schema()->get_inverted_index(col_unique_id);
                             if (index_meta == nullptr) {
                                 LOG(WARNING) << "tablet[" << 
_tablet->tablet_id()
-                                             << "] index_unique_id[" << 
unique_id
+                                             << "] column_unique_id[" << 
col_unique_id
                                              << "] index meta is null, will 
skip index compaction";
                                 return false;
                             }
@@ -606,7 +648,7 @@ Status 
Compaction::construct_output_rowset_writer(RowsetWriterContext& ctx, bool
                                 }
                                 if (!exists) {
                                     LOG(WARNING) << "tablet[" << 
_tablet->tablet_id()
-                                                 << "] index_unique_id[" << 
unique_id << "],"
+                                                 << "] column_unique_id[" << 
col_unique_id << "],"
                                                  << 
inverted_index_src_file_path
                                                  << " is not exists, will skip 
index compaction";
                                     return false;
@@ -622,7 +664,7 @@ Status 
Compaction::construct_output_rowset_writer(RowsetWriterContext& ctx, bool
                                 }
                                 if (file_size == 0) {
                                     LOG(WARNING) << "tablet[" << 
_tablet->tablet_id()
-                                                 << "] index_unique_id[" << 
unique_id << "],"
+                                                 << "] column_unique_id[" << 
col_unique_id << "],"
                                                  << 
inverted_index_src_file_path
                                                  << " is empty file, will skip 
index compaction";
                                     return false;
@@ -644,7 +686,7 @@ Status 
Compaction::construct_output_rowset_writer(RowsetWriterContext& ctx, bool
                                 // bkd index will write at least 3 files
                                 if (files.size() < 3) {
                                     LOG(WARNING) << "tablet[" << 
_tablet->tablet_id()
-                                                 << "] index_unique_id[" << 
unique_id << "],"
+                                                 << "] column_unique_id[" << 
col_unique_id << "],"
                                                  << 
inverted_index_src_file_path
                                                  << " is corrupted, will skip 
index compaction";
                                     return false;
@@ -653,8 +695,8 @@ Status 
Compaction::construct_output_rowset_writer(RowsetWriterContext& ctx, bool
                             return true;
                         });
                 if (all_have_inverted_index &&
-                    
field_is_slice_type(_cur_tablet_schema->column_by_uid(unique_id).type())) {
-                    ctx.skip_inverted_index.insert(unique_id);
+                    
field_is_slice_type(_cur_tablet_schema->column_by_uid(col_unique_id).type())) {
+                    ctx.skip_inverted_index.insert(col_unique_id);
                 }
             }
         }
diff --git a/be/src/olap/rowset/rowset.h b/be/src/olap/rowset/rowset.h
index 7ac31e608e4..7c932522006 100644
--- a/be/src/olap/rowset/rowset.h
+++ b/be/src/olap/rowset/rowset.h
@@ -304,6 +304,14 @@ public:
 
     [[nodiscard]] virtual Status add_to_binlog() { return Status::OK(); }
 
+    // is skip index compaction this time
+    bool is_skip_index_compaction(int32_t column_id) const {
+        return skip_index_compaction.find(column_id) != 
skip_index_compaction.end();
+    }
+
+    // set skip index compaction next time
+    void set_skip_index_compaction(int32_t column_id) { 
skip_index_compaction.insert(column_id); }
+
 protected:
     friend class RowsetFactory;
 
@@ -340,6 +348,9 @@ protected:
     // rowset state machine
     RowsetStateMachine _rowset_state_machine;
     std::atomic<uint64_t> _delayed_expired_timestamp = 0;
+
+    // <column_uniq_id>, skip index compaction
+    std::set<int32_t> skip_index_compaction;
 };
 
 } // namespace doris
diff --git a/be/src/olap/rowset/segment_v2/inverted_index_compaction.cpp 
b/be/src/olap/rowset/segment_v2/inverted_index_compaction.cpp
index 8458de9e7e3..b04edd6eb83 100644
--- a/be/src/olap/rowset/segment_v2/inverted_index_compaction.cpp
+++ b/be/src/olap/rowset/segment_v2/inverted_index_compaction.cpp
@@ -21,6 +21,7 @@
 
 #include "inverted_index_compound_directory.h"
 #include "inverted_index_compound_reader.h"
+#include "util/debug_points.h"
 
 namespace doris::segment_v2 {
 Status compact_column(int32_t index_id, int src_segment_num, int 
dest_segment_num,
@@ -29,11 +30,22 @@ Status compact_column(int32_t index_id, int 
src_segment_num, int dest_segment_nu
                       std::string index_writer_path, std::string tablet_path,
                       std::vector<std::vector<std::pair<uint32_t, uint32_t>>> 
trans_vec,
                       std::vector<uint32_t> dest_segment_num_rows) {
+    DBUG_EXECUTE_IF("index_compaction_compact_column_throw_error", {
+        if (index_id % 2 == 0) {
+            _CLTHROWA(CL_ERR_IO, "debug point: test throw error in index 
compaction");
+        }
+    })
+    DBUG_EXECUTE_IF("index_compaction_compact_column_status_not_ok", {
+        if (index_id % 2 == 1) {
+            return Status::Error<ErrorCode::INVERTED_INDEX_COMPACTION_ERROR>(
+                    "debug point: index compaction error");
+        }
+    })
     lucene::store::Directory* dir =
             DorisCompoundDirectoryFactory::getDirectory(fs, 
index_writer_path.c_str());
     lucene::analysis::SimpleAnalyzer<char> analyzer;
-    auto index_writer = _CLNEW lucene::index::IndexWriter(dir, &analyzer, true 
/* create */,
-                                                          true /* 
closeDirOnShutdown */);
+    auto* index_writer = _CLNEW lucene::index::IndexWriter(dir, &analyzer, 
true /* create */,
+                                                           true /* 
closeDirOnShutdown */);
 
     // get compound directory src_index_dirs
     std::vector<lucene::store::Directory*> src_index_dirs(src_segment_num);
diff --git 
a/regression-test/data/fault_injection_p0/test_index_compaction_fault_injection.out
 
b/regression-test/data/fault_injection_p0/test_index_compaction_fault_injection.out
new file mode 100644
index 00000000000..925ac94a1ac
--- /dev/null
+++ 
b/regression-test/data/fault_injection_p0/test_index_compaction_fault_injection.out
@@ -0,0 +1,291 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
diff --git 
a/regression-test/suites/fault_injection_p0/test_index_compaction_fault_injection.groovy
 
b/regression-test/suites/fault_injection_p0/test_index_compaction_fault_injection.groovy
new file mode 100644
index 00000000000..c3fd4f70272
--- /dev/null
+++ 
b/regression-test/suites/fault_injection_p0/test_index_compaction_fault_injection.groovy
@@ -0,0 +1,307 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_index_compaction_failure_injection", "nonConcurrent") {
+    def tableName = "test_index_compaction_failure_injection_dups"
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    boolean disableAutoCompaction = false
+  
+    def set_be_config = { key, value ->
+        for (String backend_id: backendId_to_backendIP.keySet()) {
+            def (code, out, err) = 
update_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), key, value)
+            logger.info("update config: code=" + code + ", out=" + out + ", 
err=" + err)
+        }
+    }
+
+    def trigger_full_compaction_on_tablets = { String[][] tablets ->
+        for (String[] tablet : tablets) {
+            String tablet_id = tablet[0]
+            String backend_id = tablet[2]
+            int times = 1
+
+            String compactionStatus;
+            do{
+                def (code, out, err) = 
be_run_full_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                ++times
+                sleep(2000)
+                compactionStatus = parseJson(out.trim()).status.toLowerCase();
+            } while (compactionStatus!="success" && times<=10 && 
compactionStatus!="e-6010")
+
+
+            if (compactionStatus == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction && compactionStatus!="e-6010") {
+                assertEquals("success", compactionStatus)
+            }
+        }
+    }
+
+    def wait_full_compaction_done = { String[][] tablets ->
+        for (String[] tablet in tablets) {
+            boolean running = true
+            do {
+                Thread.sleep(1000)
+                String tablet_id = tablet[0]
+                String backend_id = tablet[2]
+                def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+        }
+    }
+
+    def get_rowset_count = {String[][] tablets ->
+        int rowsetCount = 0
+        for (String[] tablet in tablets) {
+            def compactionStatusUrlIndex = 18
+            def (code, out, err) = curl("GET", 
tablet[compactionStatusUrlIndex])
+            logger.info("Show tablets status: code=" + code + ", out=" + out + 
", err=" + err)
+            assertEquals(code, 0)
+            def tabletJson = parseJson(out.trim())
+            assert tabletJson.rowsets instanceof List
+            rowsetCount +=((List<String>) tabletJson.rowsets).size()
+        }
+        return rowsetCount
+    }
+
+    def check_config = { String key, String value ->
+        for (String backend_id: backendId_to_backendIP.keySet()) {
+            def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
+            logger.info("Show config: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def configList = parseJson(out.trim())
+            assert configList instanceof List
+            for (Object ele in (List) configList) {
+                assert ele instanceof List<String>
+                if (((List<String>) ele)[0] == key) {
+                    assertEquals(value, ((List<String>) ele)[2])
+                }
+            }
+        }
+    }
+
+    def insert_data = { -> 
+        sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "bason", "bason hate 
pear", 99); """
+    }
+
+    def run_sql = { -> 
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+    }
+
+    def run_test = { String[][] tablets ->
+        insert_data.call()
+
+        run_sql.call()
+
+        int replicaNum = 1
+        // before full compaction, there are 7 rowsets.
+        int rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 7 * replicaNum)
+
+        // tigger full compaction for all tablets with fault injection
+        try {
+            
GetDebugPoint().enableDebugPointForAllBEs("index_compaction_compact_column_throw_error")
+            logger.info("trigger_full_compaction_on_tablets with fault 
injection: index_compaction_compact_column_throw_error")
+            trigger_full_compaction_on_tablets.call(tablets)
+        } finally {
+            
GetDebugPoint().disableDebugPointForAllBEs("index_compaction_compact_column_throw_error")
+        }
+        // after fault injection, there are still 7 rowsets.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 7 * replicaNum)
+
+        logger.info("trigger_full_compaction_on_tablets normally")
+        // trigger full compactions for all tablets in ${tableName}
+        // this time, index compaction of some columns will be skipped because 
of the fault injection
+        trigger_full_compaction_on_tablets.call(tablets)
+
+        // wait for full compaction done
+        wait_full_compaction_done.call(tablets)
+
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 1 * replicaNum)
+
+        run_sql.call()
+
+        // insert more data and trigger full compaction again
+        insert_data.call()
+
+        run_sql.call()
+
+        // insert 6 rows, so there are 7 rowsets.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 7 * replicaNum)
+
+        // tigger full compaction for all tablets with fault injection
+        try {
+            
GetDebugPoint().enableDebugPointForAllBEs("index_compaction_compact_column_status_not_ok")
+            logger.info("trigger_full_compaction_on_tablets with fault 
injection: index_compaction_compact_column_status_not_ok")
+            trigger_full_compaction_on_tablets.call(tablets)
+        } finally {
+            
GetDebugPoint().disableDebugPointForAllBEs("index_compaction_compact_column_status_not_ok")
+        }
+
+        // insert more data
+        insert_data.call()
+
+        // after fault injection, there are still 7 rowsets.
+        // and we insert 6 rows, so there are 13 rowsets.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 13 * replicaNum)
+
+        logger.info("trigger_full_compaction_on_tablets normally")
+        // trigger full compactions for all tablets in ${tableName}
+        // this time, index compaction of some columns will be skipped because 
of the fault injection
+        trigger_full_compaction_on_tablets.call(tablets)
+
+        // wait for full compaction done
+        wait_full_compaction_done.call(tablets)
+
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 1 * replicaNum)
+
+        run_sql.call()
+
+        // insert more data and trigger full compaction again
+        insert_data.call()
+        
+        // insert 6 rows, so there are 7 rowsets.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 7 * replicaNum)
+
+        // tigger full compaction for all tablets normally
+        // this time, index compaction will be done successfully
+        logger.info("trigger_full_compaction_on_tablets normally")
+        trigger_full_compaction_on_tablets.call(tablets)
+
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 1 * replicaNum)
+
+        run_sql.call()
+    }
+
+    boolean invertedIndexCompactionEnable = false
+    boolean has_update_be_config = false
+    try {
+        String backend_id;
+        backend_id = backendId_to_backendIP.keySet()[0]
+        def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
+        
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "inverted_index_compaction_enable") 
{
+                invertedIndexCompactionEnable = 
Boolean.parseBoolean(((List<String>) ele)[2])
+                logger.info("inverted_index_compaction_enable: 
${((List<String>) ele)[2]}")
+            }
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+                logger.info("disable_auto_compaction: ${((List<String>) 
ele)[2]}")
+            }
+        }
+        set_be_config.call("inverted_index_compaction_enable", "true")
+        has_update_be_config = true
+        // check updated config
+        check_config.call("inverted_index_compaction_enable", "true");
+
+
+        /**
+        * test for duplicated key table
+        */
+        sql """ DROP TABLE IF EXISTS ${tableName}; """
+        sql """
+            CREATE TABLE ${tableName} (
+                `id` int(11) NULL,
+                `name` varchar(255) NULL,
+                `hobbies` text NULL,
+                `score` int(11) NULL,
+                index index_name (name) using inverted,
+                index index_hobbies (hobbies) using inverted 
properties("parser"="english"),
+                index index_score (score) using inverted
+            ) ENGINE=OLAP
+            DUPLICATE KEY(`id`)
+            COMMENT 'OLAP'
+            DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = 
"true");
+        """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+
+        run_test.call(tablets)
+
+        /**
+        * test for unique key table
+        */
+        tableName = "test_index_compaction_failure_injection_unique"
+
+        sql """ DROP TABLE IF EXISTS ${tableName}; """
+        sql """
+            CREATE TABLE ${tableName} (
+                `id` int(11) NULL,
+                `name` varchar(255) NULL,
+                `hobbies` text NULL,
+                `score` int(11) NULL,
+                index index_name (name) using inverted,
+                index index_hobbies (hobbies) using inverted 
properties("parser"="english"),
+                index index_score (score) using inverted
+            ) ENGINE=OLAP
+            UNIQUE KEY(`id`)
+            COMMENT 'OLAP'
+            DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES ( 
+                "replication_num" = "1",
+                "disable_auto_compaction" = "true",
+                "enable_unique_key_merge_on_write" = "true"
+            );
+        """
+
+        tablets = sql """ show tablets from ${tableName}; """
+        run_test.call(tablets)
+
+    } finally {
+        if (has_update_be_config) {
+            set_be_config.call("inverted_index_compaction_enable", 
invertedIndexCompactionEnable.toString())
+        }
+    }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to