This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-0.15
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git

commit b2b39f34e844b5a2ec95b4a9efc7afc28517fb70
Author: Mingyu Chen <[email protected]>
AuthorDate: Mon Oct 25 10:07:27 2021 +0800

    [Bug] Fix bug that truncate table may change the storage medium property 
(#6905)
---
 be/src/exec/olap_scan_node.cpp                     |  2 -
 be/src/exprs/bloomfilter_predicate.cpp             |  2 +-
 be/src/exprs/runtime_filter_rpc.cpp                |  4 +-
 be/src/olap/rowset/alpha_rowset.cpp                |  4 +-
 be/src/olap/tablet.cpp                             | 16 +++++---
 be/src/olap/tablet_manager.cpp                     |  5 ++-
 be/src/runtime/data_stream_sender.cpp              |  2 +-
 be/src/runtime/runtime_filter_mgr.cpp              |  2 +-
 be/src/service/internal_service.cpp                |  2 +-
 .../java/org/apache/doris/backup/BackupJob.java    |  8 ++--
 .../java/org/apache/doris/catalog/Catalog.java     |  3 +-
 .../java/org/apache/doris/catalog/OlapTable.java   | 47 ++++++++++++----------
 .../doris/broker/hdfs/FileSystemManager.java       |  4 +-
 13 files changed, 56 insertions(+), 45 deletions(-)

diff --git a/be/src/exec/olap_scan_node.cpp b/be/src/exec/olap_scan_node.cpp
index 3dc69024..b2d90e8 100644
--- a/be/src/exec/olap_scan_node.cpp
+++ b/be/src/exec/olap_scan_node.cpp
@@ -689,7 +689,6 @@ Status OlapScanNode::get_hints(const TPaloScanRange& 
scan_range, int block_row_c
         res = table->split_range(key_range->begin_scan_range, 
key_range->end_scan_range,
                                  block_row_count, &range);
         if (res != OLAP_SUCCESS) {
-            OLAP_LOG_WARNING("fail to show hints by split range. [res=%d]", 
res);
             return Status::InternalError("fail to show hints");
         }
         ranges.emplace_back(std::move(range));
@@ -700,7 +699,6 @@ Status OlapScanNode::get_hints(const TPaloScanRange& 
scan_range, int block_row_c
         std::vector<OlapTuple> range;
         auto res = table->split_range({}, {}, block_row_count, &range);
         if (res != OLAP_SUCCESS) {
-            OLAP_LOG_WARNING("fail to show hints by split range. [res=%d]", 
res);
             return Status::InternalError("fail to show hints");
         }
         ranges.emplace_back(std::move(range));
diff --git a/be/src/exprs/bloomfilter_predicate.cpp 
b/be/src/exprs/bloomfilter_predicate.cpp
index c3b199d..44906fd 100644
--- a/be/src/exprs/bloomfilter_predicate.cpp
+++ b/be/src/exprs/bloomfilter_predicate.cpp
@@ -73,7 +73,7 @@ BloomFilterPredicate::BloomFilterPredicate(const TExprNode& 
node)
           _scan_rows(0) {}
 
 BloomFilterPredicate::~BloomFilterPredicate() {
-    LOG(INFO) << "bloom filter rows:" << _filtered_rows << ",scan_rows:" << 
_scan_rows
+    VLOG_NOTICE << "bloom filter rows:" << _filtered_rows << ",scan_rows:" << 
_scan_rows
               << ",rate:" << (double)_filtered_rows / _scan_rows;
 }
 
diff --git a/be/src/exprs/runtime_filter_rpc.cpp 
b/be/src/exprs/runtime_filter_rpc.cpp
index 87bb579..f1e6e20 100644
--- a/be/src/exprs/runtime_filter_rpc.cpp
+++ b/be/src/exprs/runtime_filter_rpc.cpp
@@ -58,8 +58,8 @@ Status IRuntimeFilter::push_to_remote(RuntimeState* state, 
const TNetworkAddress
 
     Status serialize_status = serialize(&_rpc_context->request, &data, &len);
     if (serialize_status.ok()) {
-        LOG(INFO) << "Producer:" << _rpc_context->request.ShortDebugString() 
<< addr->hostname
-                  << ":" << addr->port;
+        VLOG_NOTICE << "Producer:" << _rpc_context->request.ShortDebugString() 
<< addr->hostname
+                    << ":" << addr->port;
         if (len > 0) {
             DCHECK(data != nullptr);
             _rpc_context->cntl.request_attachment().append(data, len);
diff --git a/be/src/olap/rowset/alpha_rowset.cpp 
b/be/src/olap/rowset/alpha_rowset.cpp
index c6bec67..7a98d82 100644
--- a/be/src/olap/rowset/alpha_rowset.cpp
+++ b/be/src/olap/rowset/alpha_rowset.cpp
@@ -166,7 +166,9 @@ OLAPStatus AlphaRowset::split_range(const RowCursor& 
start_key, const RowCursor&
                                     std::vector<OlapTuple>* ranges) {
     if (key_num > _schema->num_short_key_columns()) {
         // should not happen
-        LOG(WARNING) << "key num " << key_num << " should less than or equal 
to short key column number: "
+        // But since aloha rowset is deprecated in future and it will not fail 
the query,
+        // just use VLOG to avoid too many warning logs.
+        VLOG_NOTICE << "key num " << key_num << " should less than or equal to 
short key column number: "
                 << _schema->num_short_key_columns();
         return OLAP_ERR_INVALID_SCHEMA;
     }
diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp
index b6dc448..7a77448 100644
--- a/be/src/olap/tablet.cpp
+++ b/be/src/olap/tablet.cpp
@@ -560,13 +560,19 @@ OLAPStatus Tablet::capture_consistent_versions(const 
Version& spec_version,
         std::vector<Version> missed_versions;
         calc_missed_versions_unlocked(spec_version.second, &missed_versions);
         if (missed_versions.empty()) {
-            LOG(WARNING) << "tablet:" << full_name()
-                         << ", version already has been merged. spec_version: 
" << spec_version;
+            // if version_path is null, it may be a compaction check logic.
+            // so to avoid print too many logs.
+            if (version_path != nullptr) {
+                LOG(WARNING) << "tablet:" << full_name()
+                    << ", version already has been merged. spec_version: " << 
spec_version;
+            }
             status = OLAP_ERR_VERSION_ALREADY_MERGED;
         } else {
-            LOG(WARNING) << "status:" << status << ", tablet:" << full_name()
-                         << ", missed version for version:" << spec_version;
-            _print_missed_versions(missed_versions);
+            if (version_path != nullptr) {
+                LOG(WARNING) << "status:" << status << ", tablet:" << 
full_name()
+                    << ", missed version for version:" << spec_version;
+                _print_missed_versions(missed_versions);
+            }
         }
     }
     return status;
diff --git a/be/src/olap/tablet_manager.cpp b/be/src/olap/tablet_manager.cpp
index cabd32d..9054cdb 100644
--- a/be/src/olap/tablet_manager.cpp
+++ b/be/src/olap/tablet_manager.cpp
@@ -45,6 +45,7 @@
 #include "olap/tablet_meta.h"
 #include "olap/tablet_meta_manager.h"
 #include "olap/utils.h"
+#include "service/backend_options.h"
 #include "util/doris_metrics.h"
 #include "util/file_utils.h"
 #include "util/histogram.h"
@@ -553,7 +554,7 @@ TabletSharedPtr 
TabletManager::_get_tablet_unlocked(TTabletId tablet_id, SchemaH
 
     if (tablet == nullptr) {
         if (err != nullptr) {
-            *err = "tablet does not exist";
+            *err = "tablet does not exist. " + BackendOptions::get_localhost();
         }
         return nullptr;
     }
@@ -561,7 +562,7 @@ TabletSharedPtr 
TabletManager::_get_tablet_unlocked(TTabletId tablet_id, SchemaH
     if (!tablet->is_used()) {
         LOG(WARNING) << "tablet cannot be used. tablet=" << tablet_id;
         if (err != nullptr) {
-            *err = "tablet cannot be used";
+            *err = "tablet cannot be used. " + BackendOptions::get_localhost();
         }
         return nullptr;
     }
diff --git a/be/src/runtime/data_stream_sender.cpp 
b/be/src/runtime/data_stream_sender.cpp
index 6325c56..ceda653 100644
--- a/be/src/runtime/data_stream_sender.cpp
+++ b/be/src/runtime/data_stream_sender.cpp
@@ -71,7 +71,7 @@ DataStreamSender::Channel::Channel(DataStreamSender* parent, 
const RowDescriptor
     std::string localhost = BackendOptions::get_localhost();
     _is_local = _brpc_dest_addr.hostname == localhost && _brpc_dest_addr.port 
== config::brpc_port;
     if (_is_local) {
-        LOG(INFO) << "will use local exechange, dest_node_id:" << 
_dest_node_id;
+        VLOG_NOTICE << "will use local exechange, dest_node_id:" << 
_dest_node_id;
     }
 }
 
diff --git a/be/src/runtime/runtime_filter_mgr.cpp 
b/be/src/runtime/runtime_filter_mgr.cpp
index 58cec23..64f7501 100644
--- a/be/src/runtime/runtime_filter_mgr.cpp
+++ b/be/src/runtime/runtime_filter_mgr.cpp
@@ -231,7 +231,7 @@ Status RuntimeFilterMergeControllerEntity::merge(const 
PMergeFilterRequest* requ
 
             PBackendService_Stub* stub = 
ExecEnv::GetInstance()->brpc_stub_cache()->get_stub(
                     targets[i].target_fragment_instance_addr);
-            LOG(INFO) << "send filter " << rpc_contexts[i]->request.filter_id()
+            VLOG_NOTICE << "send filter " << 
rpc_contexts[i]->request.filter_id()
                       << " to:" << 
targets[i].target_fragment_instance_addr.hostname << ":"
                       << targets[i].target_fragment_instance_addr.port
                       << rpc_contexts[i]->request.ShortDebugString();
diff --git a/be/src/service/internal_service.cpp 
b/be/src/service/internal_service.cpp
index 621c692..35e94a9 100644
--- a/be/src/service/internal_service.cpp
+++ b/be/src/service/internal_service.cpp
@@ -308,7 +308,7 @@ void 
PInternalServiceImpl<T>::apply_filter(::google::protobuf::RpcController* co
     auto attachment = 
static_cast<brpc::Controller*>(controller)->request_attachment();
     UniqueId unique_id(request->query_id());
     // TODO: avoid copy attachment copy
-    LOG(INFO) << "rpc apply_filter recv";
+    VLOG_NOTICE << "rpc apply_filter recv";
     Status st = _exec_env->fragment_mgr()->apply_filter(request, 
attachment.to_string().data());
     if (!st.ok()) {
         LOG(WARNING) << "apply filter meet error" << st.to_string();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java 
b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java
index d75e1c7..55f676e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java
@@ -49,9 +49,6 @@ import org.apache.doris.thrift.TFinishTaskRequest;
 import org.apache.doris.thrift.TStatusCode;
 import org.apache.doris.thrift.TTaskType;
 
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;
@@ -61,6 +58,9 @@ import com.google.common.collect.Collections2;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.File;
@@ -505,7 +505,7 @@ public class BackupJob extends AbstractJob {
                     // only copy visible indexes
                     List<String> reservedPartitions = 
tableRef.getPartitionNames() == null ? null
                             : tableRef.getPartitionNames().getPartitionNames();
-                    OlapTable copiedTbl = 
olapTable.selectiveCopy(reservedPartitions, true, IndexExtState.VISIBLE);
+                    OlapTable copiedTbl = 
olapTable.selectiveCopy(reservedPartitions, IndexExtState.VISIBLE, true);
                     if (copiedTbl == null) {
                         status = new Status(ErrCode.COMMON_ERROR, "failed to 
copy table: " + tblName);
                         return;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
index f82d09b..1c10990 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
@@ -777,6 +777,7 @@ public class Catalog {
         // 1. check and create dirs and files
         File meta = new File(metaDir);
         if (!meta.exists()) {
+            LOG.warn("Doris' meta dir {} does not exist. You need to create it 
before starting FE", meta.getAbsolutePath());
             throw new Exception(meta.getAbsolutePath() + " does not exist, 
will exit");
         }
 
@@ -6676,7 +6677,7 @@ public class Catalog {
                     partitionsDistributionInfo.put(partition.getId(), 
partition.getDistributionInfo());
                 }
             }
-            copiedTbl = olapTable.selectiveCopy(origPartitions.keySet(), true, 
IndexExtState.VISIBLE);
+            copiedTbl = olapTable.selectiveCopy(origPartitions.keySet(), 
IndexExtState.VISIBLE, false);
         } finally {
             olapTable.readUnlock();
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
index 1af2097..5fde029 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -1278,34 +1278,37 @@ public class OlapTable extends Table {
         return table instanceof OlapTable;
     }
 
-    public OlapTable selectiveCopy(Collection<String> reservedPartitions, 
boolean resetState, IndexExtState extState) {
+    public OlapTable selectiveCopy(Collection<String> reservedPartitions, 
IndexExtState extState, boolean isForBackup) {
         OlapTable copied = new OlapTable();
         if (!DeepCopy.copy(this, copied, OlapTable.class, 
FeConstants.meta_version)) {
             LOG.warn("failed to copy olap table: " + getName());
             return null;
         }
-        
-        if (resetState) {
-            // remove shadow index from copied table
-            List<MaterializedIndex> shadowIndex = 
copied.getPartitions().stream().findFirst().get().getMaterializedIndices(IndexExtState.SHADOW);
+
+        // remove shadow index from copied table
+        List<MaterializedIndex> shadowIndex = 
copied.getPartitions().stream().findFirst().get().getMaterializedIndices(IndexExtState.SHADOW);
+        for (MaterializedIndex deleteIndex : shadowIndex) {
+            LOG.debug("copied table delete shadow index : {}", 
deleteIndex.getId());
+            
copied.deleteIndexInfo(copied.getIndexNameById(deleteIndex.getId()));
+        }
+        copied.setState(OlapTableState.NORMAL);
+        for (Partition partition : copied.getPartitions()) {
+            // remove shadow index from partition
             for (MaterializedIndex deleteIndex : shadowIndex) {
-                LOG.debug("copied table delete shadow index : {}", 
deleteIndex.getId());
-                
copied.deleteIndexInfo(copied.getIndexNameById(deleteIndex.getId()));
+                partition.deleteRollupIndex(deleteIndex.getId());
             }
-            copied.setState(OlapTableState.NORMAL);
-            for (Partition partition : copied.getPartitions()) {
-                // remove shadow index from partition
-                for (MaterializedIndex deleteIndex : shadowIndex) {
-                    partition.deleteRollupIndex(deleteIndex.getId());
-                }
-                partition.setState(PartitionState.NORMAL);
+            partition.setState(PartitionState.NORMAL);
+            if (isForBackup) {
+                // set storage medium to HDD for backup job, because we want 
that the backuped table
+                // can be able to restored to another Doris cluster without 
SSD disk.
+                // But for other operation such as truncate table, keep the 
origin storage medium.
                 copied.getPartitionInfo().setDataProperty(partition.getId(), 
new DataProperty(TStorageMedium.HDD));
-                for (MaterializedIndex idx : 
partition.getMaterializedIndices(extState)) {
-                    idx.setState(IndexState.NORMAL);
-                    for (Tablet tablet : idx.getTablets()) {
-                        for (Replica replica : tablet.getReplicas()) {
-                            replica.setState(ReplicaState.NORMAL);
-                        }
+            }
+            for (MaterializedIndex idx : 
partition.getMaterializedIndices(extState)) {
+                idx.setState(IndexState.NORMAL);
+                for (Tablet tablet : idx.getTablets()) {
+                    for (Replica replica : tablet.getReplicas()) {
+                        replica.setState(ReplicaState.NORMAL);
                     }
                 }
             }
@@ -1315,10 +1318,10 @@ public class OlapTable extends Table {
             // reserve all
             return copied;
         }
-        
+
         Set<String> partNames = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER);
         partNames.addAll(copied.getPartitionNames());
-        
+
         for (String partName : partNames) {
             if (!reservedPartitions.contains(partName)) {
                 copied.dropPartitionAndReserveTablet(partName);
diff --git 
a/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/FileSystemManager.java
 
b/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/FileSystemManager.java
index 562e1ac..6cedd70 100644
--- 
a/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/FileSystemManager.java
+++ 
b/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/FileSystemManager.java
@@ -241,7 +241,7 @@ public class FileSystemManager {
                 return null;
             }
             if (fileSystem.getDFSFileSystem() == null) {
-                logger.info("could not find file system for path " + path + " 
create a new one");
+                logger.info("create file system for new path: " + path);
                 UserGroupInformation ugi = null;
 
                 // create a new filesystem
@@ -402,7 +402,7 @@ public class FileSystemManager {
                 return null;
             }
             if (fileSystem.getDFSFileSystem() == null) {
-                logger.info("could not find file system for path " + path + " 
create a new one");
+                logger.info("create file system for new path " + path);
                 // create a new filesystem
                 Configuration conf = new Configuration();
                 conf.set(FS_S3A_ACCESS_KEY, accessKey);

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to