This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-4.0-preview
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 77916e37a279b081fef1bbdd0dd193272651a022
Author: huanghaibin <[email protected]>
AuthorDate: Fri Apr 19 20:41:57 2024 +0800

    [fix](mow) cloud full compaction should check unique key when trying to 
calculate delete bitmap (#33859)
    
    now cloud full compaction on duplicate key will make be core, because it 
try to calculate delete bitmap which only exists in mow table.
---
 be/src/cloud/cloud_full_compaction.cpp | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/be/src/cloud/cloud_full_compaction.cpp 
b/be/src/cloud/cloud_full_compaction.cpp
index b144a0a7c42..972fd396497 100644
--- a/be/src/cloud/cloud_full_compaction.cpp
+++ b/be/src/cloud/cloud_full_compaction.cpp
@@ -200,10 +200,13 @@ Status CloudFullCompaction::modify_rowsets() {
     
compaction_job->add_output_rowset_ids(_output_rowset->rowset_id().to_string());
 
     DeleteBitmapPtr output_rowset_delete_bitmap = nullptr;
-    int64_t initiator =
-            boost::hash_range(_uuid.begin(), _uuid.end()) & 
std::numeric_limits<int64_t>::max();
-    RETURN_IF_ERROR(_cloud_full_compaction_update_delete_bitmap(initiator));
-    compaction_job->set_delete_bitmap_lock_initiator(initiator);
+    if (_tablet->keys_type() == KeysType::UNIQUE_KEYS &&
+        _tablet->enable_unique_key_merge_on_write()) {
+        int64_t initiator =
+                boost::hash_range(_uuid.begin(), _uuid.end()) & 
std::numeric_limits<int64_t>::max();
+        
RETURN_IF_ERROR(_cloud_full_compaction_update_delete_bitmap(initiator));
+        compaction_job->set_delete_bitmap_lock_initiator(initiator);
+    }
 
     cloud::FinishTabletJobResponse resp;
     auto st = _engine.meta_mgr().commit_tablet_job(job, &resp);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to