This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-1.2-lts by this push:
new 0b63afa4fb [fix](hdfs)(catalog) fix BE crash when hdfs-site.xml not
exist in be/conf and fix compute node logic (#17357)
0b63afa4fb is described below
commit 0b63afa4fb5f9bfa87901d326ea9279891b2e559
Author: Mingyu Chen <[email protected]>
AuthorDate: Thu Mar 2 23:47:33 2023 +0800
[fix](hdfs)(catalog) fix BE crash when hdfs-site.xml not exist in be/conf
and fix compute node logic (#17357)
cherry-pick #17244
---
be/src/common/status.cpp | 1 +
be/src/common/status.h | 2 ++
be/src/io/hdfs_builder.cpp | 41 +++++++++++++++-------
be/src/io/hdfs_builder.h | 22 ++++++++----
be/src/io/hdfs_file_reader.cpp | 6 ++--
be/src/io/hdfs_writer.cpp | 12 +++----
be/src/io/hdfs_writer.h | 1 -
be/src/util/hdfs_storage_backend.cpp | 15 +++++---
be/src/util/hdfs_storage_backend.h | 3 +-
be/src/util/hdfs_util.cpp | 9 +----
be/test/common/config_test.cpp | 21 +++++++----
be/test/common/status_test.cpp | 6 ++--
bin/start_be.sh | 4 ++-
.../main/java/org/apache/doris/common/Config.java | 17 ++++++---
.../doris/planner/external/BackendPolicy.java | 4 +--
.../planner/external/ExternalFileScanNode.java | 19 ++++++++++
.../org/apache/doris/system/BeSelectionPolicy.java | 20 +++++------
.../apache/doris/system/SystemInfoServiceTest.java | 12 +++----
18 files changed, 135 insertions(+), 80 deletions(-)
diff --git a/be/src/common/status.cpp b/be/src/common/status.cpp
index 9dc8b27a4b..f066fe23c3 100644
--- a/be/src/common/status.cpp
+++ b/be/src/common/status.cpp
@@ -191,6 +191,7 @@ std::string Status::to_string() const {
if (ok()) {
return result;
}
+ result.append(fmt::format("({})", _be_ip));
if (precise_code() != 1) {
result.append(fmt::format("(error {})", precise_code()));
}
diff --git a/be/src/common/status.h b/be/src/common/status.h
index 46c3a66e89..3a8506283b 100644
--- a/be/src/common/status.h
+++ b/be/src/common/status.h
@@ -14,6 +14,7 @@
#include "common/compiler_util.h"
#include "common/logging.h"
#include "gen_cpp/Status_types.h" // for TStatus
+#include "service/backend_options.h"
namespace doris {
@@ -510,6 +511,7 @@ private:
TStatusCode::type _code;
int16_t _precise_code;
std::string _err_msg;
+ std::string _be_ip = BackendOptions::get_localhost();
};
// Override the << operator, it is used during LOG(INFO) << "xxxx" << status;
diff --git a/be/src/io/hdfs_builder.cpp b/be/src/io/hdfs_builder.cpp
index bb58b3c11e..b08b973860 100644
--- a/be/src/io/hdfs_builder.cpp
+++ b/be/src/io/hdfs_builder.cpp
@@ -28,6 +28,16 @@
#include "util/url_coding.h"
namespace doris {
+Status HDFSCommonBuilder::init_hdfs_builder() {
+ hdfs_builder = hdfsNewBuilder();
+ if (hdfs_builder == nullptr) {
+ LOG(INFO) << "failed to init HDFSCommonBuilder, please check check
be/conf/hdfs-site.xml";
+ return Status::InternalError(
+ "failed to init HDFSCommonBuilder, please check check
be/conf/hdfs-site.xml");
+ }
+ return Status::OK();
+}
+
Status HDFSCommonBuilder::run_kinit() {
if (hdfs_kerberos_principal.empty() || hdfs_kerberos_keytab.empty()) {
return Status::InvalidArgument("Invalid hdfs_kerberos_principal or
hdfs_kerberos_keytab");
@@ -79,36 +89,41 @@ THdfsParams parse_properties(const std::map<std::string,
std::string>& propertie
return hdfsParams;
}
-HDFSCommonBuilder createHDFSBuilder(const THdfsParams& hdfsParams) {
- HDFSCommonBuilder builder;
- hdfsBuilderSetNameNode(builder.get(), hdfsParams.fs_name.c_str());
+Status createHDFSBuilder(const THdfsParams& hdfsParams, HDFSCommonBuilder*
builder) {
+ RETURN_IF_ERROR(builder->init_hdfs_builder());
+ hdfsBuilderSetNameNode(builder->get(), hdfsParams.fs_name.c_str());
// set hdfs user
if (hdfsParams.__isset.user) {
- hdfsBuilderSetUserName(builder.get(), hdfsParams.user.c_str());
+ hdfsBuilderSetUserName(builder->get(), hdfsParams.user.c_str());
}
// set kerberos conf
if (hdfsParams.__isset.hdfs_kerberos_principal) {
- builder.need_kinit = true;
- builder.hdfs_kerberos_principal = hdfsParams.hdfs_kerberos_principal;
- hdfsBuilderSetPrincipal(builder.get(),
hdfsParams.hdfs_kerberos_principal.c_str());
+ builder->need_kinit = true;
+ builder->hdfs_kerberos_principal = hdfsParams.hdfs_kerberos_principal;
+ hdfsBuilderSetPrincipal(builder->get(),
hdfsParams.hdfs_kerberos_principal.c_str());
}
if (hdfsParams.__isset.hdfs_kerberos_keytab) {
- builder.need_kinit = true;
- builder.hdfs_kerberos_keytab = hdfsParams.hdfs_kerberos_keytab;
+ builder->need_kinit = true;
+ builder->hdfs_kerberos_keytab = hdfsParams.hdfs_kerberos_keytab;
}
// set other conf
if (hdfsParams.__isset.hdfs_conf) {
for (const THdfsConf& conf : hdfsParams.hdfs_conf) {
- hdfsBuilderConfSetStr(builder.get(), conf.key.c_str(),
conf.value.c_str());
+ hdfsBuilderConfSetStr(builder->get(), conf.key.c_str(),
conf.value.c_str());
}
}
- return builder;
+ if (builder->is_need_kinit()) {
+ RETURN_IF_ERROR(builder->run_kinit());
+ }
+
+ return Status::OK();
}
-HDFSCommonBuilder createHDFSBuilder(const std::map<std::string, std::string>&
properties) {
+Status createHDFSBuilder(const std::map<std::string, std::string>& properties,
+ HDFSCommonBuilder* builder) {
THdfsParams hdfsParams = parse_properties(properties);
- return createHDFSBuilder(hdfsParams);
+ return createHDFSBuilder(hdfsParams, builder);
}
} // namespace doris
diff --git a/be/src/io/hdfs_builder.h b/be/src/io/hdfs_builder.h
index a8feb0ffa1..eb63fab1b5 100644
--- a/be/src/io/hdfs_builder.h
+++ b/be/src/io/hdfs_builder.h
@@ -31,13 +31,20 @@ const std::string KERBEROS_KEYTAB =
"hadoop.kerberos.keytab";
const std::string TICKET_CACHE_PATH = "/tmp/krb5cc_doris_";
class HDFSCommonBuilder {
- friend HDFSCommonBuilder createHDFSBuilder(const THdfsParams& hdfsParams);
- friend HDFSCommonBuilder createHDFSBuilder(
- const std::map<std::string, std::string>& properties);
+ friend Status createHDFSBuilder(const THdfsParams& hdfsParams,
HDFSCommonBuilder* builder);
+ friend Status createHDFSBuilder(const std::map<std::string, std::string>&
properties,
+ HDFSCommonBuilder* builder);
public:
- HDFSCommonBuilder() : hdfs_builder(hdfsNewBuilder()) {};
- ~HDFSCommonBuilder() { hdfsFreeBuilder(hdfs_builder); };
+ HDFSCommonBuilder() = default;
+ ~HDFSCommonBuilder() {
+ if (hdfs_builder != nullptr) {
+ hdfsFreeBuilder(hdfs_builder);
+ }
+ }
+
+ // Must call this to init hdfs_builder first.
+ Status init_hdfs_builder();
hdfsBuilder* get() { return hdfs_builder; };
bool is_need_kinit() { return need_kinit; };
@@ -52,7 +59,8 @@ private:
THdfsParams parse_properties(const std::map<std::string, std::string>&
properties);
-HDFSCommonBuilder createHDFSBuilder(const THdfsParams& hdfsParams);
-HDFSCommonBuilder createHDFSBuilder(const std::map<std::string, std::string>&
properties);
+Status createHDFSBuilder(const THdfsParams& hdfsParams, HDFSCommonBuilder*
builder);
+Status createHDFSBuilder(const std::map<std::string, std::string>& properties,
+ HDFSCommonBuilder* builder);
} // namespace doris
diff --git a/be/src/io/hdfs_file_reader.cpp b/be/src/io/hdfs_file_reader.cpp
index 78a21ac477..1bdd96cd2a 100644
--- a/be/src/io/hdfs_file_reader.cpp
+++ b/be/src/io/hdfs_file_reader.cpp
@@ -203,10 +203,8 @@ Status HdfsFileReader::tell(int64_t* position) {
int HdfsFsCache::MAX_CACHE_HANDLE = 64;
Status HdfsFsCache::_create_fs(THdfsParams& hdfs_params, hdfsFS* fs) {
- HDFSCommonBuilder builder = createHDFSBuilder(hdfs_params);
- if (builder.is_need_kinit()) {
- RETURN_IF_ERROR(builder.run_kinit());
- }
+ HDFSCommonBuilder builder;
+ RETURN_IF_ERROR(createHDFSBuilder(hdfs_params, &builder));
hdfsFS hdfs_fs = hdfsBuilderConnect(builder.get());
if (hdfs_fs == nullptr) {
return Status::InternalError("connect to hdfs failed. error: {}",
hdfsGetLastError());
diff --git a/be/src/io/hdfs_writer.cpp b/be/src/io/hdfs_writer.cpp
index 51c9fed7d7..7b44d3afbe 100644
--- a/be/src/io/hdfs_writer.cpp
+++ b/be/src/io/hdfs_writer.cpp
@@ -25,10 +25,7 @@ namespace doris {
HDFSWriter::HDFSWriter(const std::map<std::string, std::string>& properties,
const std::string& path)
- : _properties(properties),
- _path(path),
- _hdfs_fs(nullptr),
- _builder(createHDFSBuilder(_properties)) {
+ : _properties(properties), _path(path), _hdfs_fs(nullptr) {
_parse_properties(_properties);
}
@@ -136,10 +133,9 @@ Status HDFSWriter::close() {
}
Status HDFSWriter::_connect() {
- if (_builder.is_need_kinit()) {
- RETURN_IF_ERROR(_builder.run_kinit());
- }
- _hdfs_fs = hdfsBuilderConnect(_builder.get());
+ HDFSCommonBuilder builder;
+ RETURN_IF_ERROR(createHDFSBuilder(_properties, &builder));
+ _hdfs_fs = hdfsBuilderConnect(builder.get());
if (_hdfs_fs == nullptr) {
return Status::InternalError("connect to hdfs failed. namenode
address:{}, error {}",
_namenode, hdfsGetLastError());
diff --git a/be/src/io/hdfs_writer.h b/be/src/io/hdfs_writer.h
index ffb38aad67..f5ef32a238 100644
--- a/be/src/io/hdfs_writer.h
+++ b/be/src/io/hdfs_writer.h
@@ -48,7 +48,6 @@ private:
hdfsFS _hdfs_fs = nullptr;
hdfsFile _hdfs_file = nullptr;
bool _closed = false;
- HDFSCommonBuilder _builder;
};
} // namespace doris
diff --git a/be/src/util/hdfs_storage_backend.cpp
b/be/src/util/hdfs_storage_backend.cpp
index 71df59c086..6f3baf2d96 100644
--- a/be/src/util/hdfs_storage_backend.cpp
+++ b/be/src/util/hdfs_storage_backend.cpp
@@ -35,9 +35,16 @@ namespace doris {
static const std::string hdfs_file_prefix = "hdfs://";
HDFSStorageBackend::HDFSStorageBackend(const std::map<std::string,
std::string>& prop)
- : _properties(prop), _builder(createHDFSBuilder(_properties)) {
- _hdfs_fs = HDFSHandle::instance().create_hdfs_fs(_builder);
- DCHECK(_hdfs_fs) << "init hdfs client error.";
+ : _properties(prop) {
+ HDFSCommonBuilder builder;
+ Status st = createHDFSBuilder(_properties, &builder);
+ if (st.ok()) {
+ _hdfs_fs = HDFSHandle::instance().create_hdfs_fs(builder);
+ DCHECK(_hdfs_fs) << "init hdfs client error.";
+ }
+ // if createHDFSBuilder failed, _hdfs_fs will be null.
+ // and CHECK_HDFS_CLIENT will return error.
+ // TODO: refacotr StorageBackend, unify into File system
}
HDFSStorageBackend::~HDFSStorageBackend() {
@@ -306,4 +313,4 @@ Status HDFSStorageBackend::rmdir(const std::string& remote)
{
return rm(remote);
}
-} // end namespace doris
\ No newline at end of file
+} // end namespace doris
diff --git a/be/src/util/hdfs_storage_backend.h
b/be/src/util/hdfs_storage_backend.h
index a80fad486f..acbf18d2d0 100644
--- a/be/src/util/hdfs_storage_backend.h
+++ b/be/src/util/hdfs_storage_backend.h
@@ -56,8 +56,7 @@ private:
private:
std::map<std::string, std::string> _properties;
- HDFSCommonBuilder _builder;
hdfsFS _hdfs_fs = nullptr;
};
-} // end namespace doris
\ No newline at end of file
+} // end namespace doris
diff --git a/be/src/util/hdfs_util.cpp b/be/src/util/hdfs_util.cpp
index 5ffa09b966..b58fc75e46 100644
--- a/be/src/util/hdfs_util.cpp
+++ b/be/src/util/hdfs_util.cpp
@@ -30,13 +30,6 @@ HDFSHandle& HDFSHandle::instance() {
}
hdfsFS HDFSHandle::create_hdfs_fs(HDFSCommonBuilder& hdfs_builder) {
- if (hdfs_builder.is_need_kinit()) {
- Status status = hdfs_builder.run_kinit();
- if (!status.ok()) {
- LOG(WARNING) << status.get_error_msg();
- return nullptr;
- }
- }
hdfsFS hdfs_fs = hdfsBuilderConnect(hdfs_builder.get());
if (hdfs_fs == nullptr) {
LOG(WARNING) << "connect to hdfs failed."
@@ -46,4 +39,4 @@ hdfsFS HDFSHandle::create_hdfs_fs(HDFSCommonBuilder&
hdfs_builder) {
return hdfs_fs;
}
-} // namespace doris
\ No newline at end of file
+} // namespace doris
diff --git a/be/test/common/config_test.cpp b/be/test/common/config_test.cpp
index e835d8313b..72c8b26252 100644
--- a/be/test/common/config_test.cpp
+++ b/be/test/common/config_test.cpp
@@ -99,36 +99,45 @@ TEST_F(ConfigTest, UpdateConfigs) {
// not exist
Status s = config::set_config("cfg_not_exist", "123");
EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.to_string(), "Not found: 'cfg_not_exist' is not found");
+ EXPECT_TRUE(s.to_string().find("Not found") != std::string::npos);
+ EXPECT_TRUE(s.to_string().find("'cfg_not_exist' is not found") !=
std::string::npos);
// immutable
EXPECT_TRUE(cfg_bool_immutable);
s = config::set_config("cfg_bool_immutable", "false");
EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.to_string(), "Not supported: 'cfg_bool_immutable' is not
support to modify");
+ EXPECT_TRUE(s.to_string().find("Not supported") != std::string::npos);
+ EXPECT_TRUE(s.to_string().find("'cfg_bool_immutable' is not support to
modify") !=
+ std::string::npos);
EXPECT_TRUE(cfg_bool_immutable);
// convert error
s = config::set_config("cfg_bool", "falseeee");
EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.to_string(), "Invalid argument: convert 'falseeee' as bool
failed");
+ EXPECT_TRUE(s.to_string().find("Invalid argument") != std::string::npos);
+ EXPECT_TRUE(s.to_string().find("convert 'falseeee' as bool failed") !=
std::string::npos);
EXPECT_TRUE(cfg_bool);
s = config::set_config("cfg_double", "");
EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.to_string(), "Invalid argument: convert '' as double failed");
+ EXPECT_TRUE(s.to_string().find("Invalid argument") != std::string::npos);
+ EXPECT_TRUE(s.to_string().find("convert '' as double failed") !=
std::string::npos);
EXPECT_EQ(cfg_double, 654.321);
// convert error
s = config::set_config("cfg_int32_t", "4294967296124");
EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.to_string(), "Invalid argument: convert '4294967296124' as
int32_t failed");
+ EXPECT_TRUE(s.to_string().find("Invalid argument") != std::string::npos);
+ EXPECT_TRUE(s.to_string().find("convert '4294967296124' as int32_t
failed") !=
+ std::string::npos);
EXPECT_EQ(cfg_int32_t, 65536124);
// not support
s = config::set_config("cfg_std_string", "test");
EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.to_string(), "Not supported: 'cfg_std_string' is not support
to modify");
+ EXPECT_TRUE(s.to_string().find("Not supported") != std::string::npos);
+ EXPECT_TRUE(s.to_string().find("'cfg_std_string' is not support to
modify") !=
+ std::string::npos);
EXPECT_EQ(cfg_std_string, "doris_config_test_string");
}
diff --git a/be/test/common/status_test.cpp b/be/test/common/status_test.cpp
index dbeb4ad9b3..502826d4f0 100644
--- a/be/test/common/status_test.cpp
+++ b/be/test/common/status_test.cpp
@@ -51,7 +51,8 @@ TEST_F(StatusTest, Error) {
Status st = Status::InternalError("123");
EXPECT_FALSE(st.ok());
EXPECT_EQ("123", st.get_error_msg());
- EXPECT_EQ("Internal error: 123", st.to_string());
+ EXPECT_TRUE(st.to_string().find("Internal error") != std::string::npos);
+ EXPECT_TRUE(st.to_string().find("123") != std::string::npos);
// copy
{
Status other = st;
@@ -67,7 +68,8 @@ TEST_F(StatusTest, Error) {
Status other = std::move(st);
EXPECT_FALSE(other.ok());
EXPECT_EQ("456", other.get_error_msg());
- EXPECT_EQ("Internal error: 456", other.to_string());
+ EXPECT_TRUE(other.to_string().find("Internal error") !=
std::string::npos);
+ EXPECT_TRUE(other.to_string().find("456") != std::string::npos);
}
}
diff --git a/bin/start_be.sh b/bin/start_be.sh
index f34973af3c..d383c25163 100755
--- a/bin/start_be.sh
+++ b/bin/start_be.sh
@@ -231,7 +231,9 @@ set_tcmalloc_heap_limit() {
# set_tcmalloc_heap_limit || exit 1
## set hdfs conf
-export LIBHDFS3_CONF="${DORIS_HOME}/conf/hdfs-site.xml"
+if [[ -f "${DORIS_HOME}/conf/hdfs-site.xml" ]]; then
+ export LIBHDFS3_CONF="${DORIS_HOME}/conf/hdfs-site.xml"
+fi
# see https://github.com/jemalloc/jemalloc/issues/2366
export
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:30000,dirty_decay_ms:30000,oversize_threshold:0,lg_tcache_max:16,prof:true,prof_prefix:jeprof.out"
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
index fb0764e0ff..ae72c82e81 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
@@ -1812,12 +1812,21 @@ public class Config extends ConfigBase {
public static long scheduler_mtmv_task_expired = 24 * 60 * 60L; // 1day
/**
- * The candidate of the backend node for federation query such as hive
table and es table query.
- * If the backend of computation role is less than this value, it will
acquire some mix backend.
- * If the computation backend is enough, federation query will only assign
to computation backend.
+ * If set to true, query on external table will prefer to assign to
compute node.
+ * And the max number of compute node is controlled by
min_backend_num_for_external_table.
+ * If set to false, query on external table will assign to any node.
*/
@ConfField(mutable = true, masterOnly = false)
- public static int backend_num_for_federation = 3;
+ public static boolean prefer_compute_node_for_external_table = false;
+
+ /**
+ * Only take effect when prefer_compute_node_for_external_table is true.
+ * If the compute node number is less than this value, query on external
table will try to get some mix node
+ * to assign, to let the total number of node reach this value.
+ * If the compute node number is larger than this value, query on external
table will assign to compute node only.
+ */
+ @ConfField(mutable = true, masterOnly = false)
+ public static int min_backend_num_for_external_table = 3;
/**
* Max query profile num.
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/BackendPolicy.java
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/BackendPolicy.java
index cdc436a83a..3291b31741 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/BackendPolicy.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/BackendPolicy.java
@@ -57,8 +57,8 @@ public class BackendPolicy {
.needQueryAvailable()
.needLoadAvailable()
.addTags(tags)
- .preferComputeNode()
- .assignCandidateNum(Config.backend_num_for_federation)
+
.preferComputeNode(Config.prefer_compute_node_for_external_table)
+ .assignExpectBeNum(Config.min_backend_num_for_external_table)
.build();
backends.addAll(policy.getCandidateBackends(Env.getCurrentSystemInfo().getIdToBackend().values()));
if (backends.isEmpty()) {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/ExternalFileScanNode.java
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/ExternalFileScanNode.java
index a5a917bb17..361f8e9e91 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/ExternalFileScanNode.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/ExternalFileScanNode.java
@@ -55,6 +55,7 @@ import
org.apache.doris.tablefunction.ExternalFileTableValuedFunction;
import org.apache.doris.thrift.TBrokerFileStatus;
import org.apache.doris.thrift.TExplainLevel;
import org.apache.doris.thrift.TExpr;
+import org.apache.doris.thrift.TFileRangeDesc;
import org.apache.doris.thrift.TFileScanNode;
import org.apache.doris.thrift.TFileScanRangeParams;
import org.apache.doris.thrift.TFileScanSlotInfo;
@@ -610,6 +611,24 @@ public class ExternalFileScanNode extends ExternalScanNode
{
output.append(prefix).append("partition=").append(readPartitionNum).append("/").append(totalPartitionNum)
.append("\n");
+ if (detailLevel == TExplainLevel.VERBOSE) {
+ output.append(prefix).append("backends:").append("\n");
+ for (TScanRangeLocations locations : scanRangeLocations) {
+ output.append(prefix).append("
").append(locations.getLocations().get(0).backend_id).append("\n");
+ List<TFileRangeDesc> files =
locations.getScanRange().getExtScanRange().getFileScanRange().getRanges();
+ for (int i = 0; i < 3; i++) {
+ if (i >= files.size()) {
+ break;
+ }
+ TFileRangeDesc file = files.get(i);
+ output.append(prefix).append(" ").append(file.getPath())
+ .append(" start: ").append(file.getStartOffset())
+ .append(" length: ").append(file.getFileSize())
+ .append("\n");
+ }
+ }
+ }
+
output.append(prefix);
if (cardinality > 0) {
output.append(String.format("cardinality=%s, ", cardinality));
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/system/BeSelectionPolicy.java
b/fe/fe-core/src/main/java/org/apache/doris/system/BeSelectionPolicy.java
index 6c95f6ecfb..0fac609224 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/system/BeSelectionPolicy.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/system/BeSelectionPolicy.java
@@ -47,7 +47,7 @@ public class BeSelectionPolicy {
public boolean allowOnSameHost = false;
public boolean preferComputeNode = false;
- public int candidateNum = Integer.MAX_VALUE;
+ public int expectBeNum = 0;
private BeSelectionPolicy() {
@@ -100,13 +100,13 @@ public class BeSelectionPolicy {
return this;
}
- public Builder preferComputeNode() {
- policy.preferComputeNode = true;
+ public Builder preferComputeNode(boolean prefer) {
+ policy.preferComputeNode = prefer;
return this;
}
- public Builder assignCandidateNum(int candidateNum) {
- policy.candidateNum = candidateNum;
+ public Builder assignExpectBeNum(int expectBeNum) {
+ policy.expectBeNum = expectBeNum;
return this;
}
@@ -141,25 +141,21 @@ public class BeSelectionPolicy {
public List<Backend> getCandidateBackends(ImmutableCollection<Backend>
backends) {
List<Backend> filterBackends =
backends.stream().filter(this::isMatch).collect(Collectors.toList());
- Collections.shuffle(filterBackends);
List<Backend> candidates = new ArrayList<>();
if (preferComputeNode) {
int num = 0;
// pick compute node first
for (Backend backend : filterBackends) {
if (backend.isComputeNode()) {
- if (num >= candidateNum) {
- break;
- }
candidates.add(backend);
num++;
}
}
// fill with some mix node.
- if (num < candidateNum) {
+ if (num < expectBeNum) {
for (Backend backend : filterBackends) {
if (backend.isMixNode()) {
- if (num >= candidateNum) {
+ if (num >= expectBeNum) {
break;
}
candidates.add(backend);
@@ -170,7 +166,7 @@ public class BeSelectionPolicy {
} else {
candidates.addAll(filterBackends);
}
-
+ Collections.shuffle(candidates);
return candidates;
}
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
index 546449b79c..ec9ce6301d 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
@@ -235,15 +235,15 @@ public class SystemInfoServiceTest {
Assert.assertEquals(0, infoService.selectBackendIdsByPolicy(policy01,
1).size());
BeSelectionPolicy policy02 = new
BeSelectionPolicy.Builder().addTags(Sets.newHashSet(taga))
-
.setStorageMedium(TStorageMedium.HDD).preferComputeNode().build();
+
.setStorageMedium(TStorageMedium.HDD).preferComputeNode(true).build();
Assert.assertEquals(1, infoService.selectBackendIdsByPolicy(policy02,
1).size());
BeSelectionPolicy policy03 = new
BeSelectionPolicy.Builder().addTags(Sets.newHashSet(taga))
-
.setStorageMedium(TStorageMedium.HDD).preferComputeNode().assignCandidateNum(0).build();
- Assert.assertEquals(0, infoService.selectBackendIdsByPolicy(policy03,
1).size());
+
.setStorageMedium(TStorageMedium.HDD).preferComputeNode(true).assignExpectBeNum(0).build();
+ Assert.assertEquals(1, infoService.selectBackendIdsByPolicy(policy03,
1).size());
BeSelectionPolicy policy04 = new
BeSelectionPolicy.Builder().addTags(Sets.newHashSet(taga))
-
.setStorageMedium(TStorageMedium.HDD).preferComputeNode().assignCandidateNum(1).build();
+
.setStorageMedium(TStorageMedium.HDD).preferComputeNode(true).assignExpectBeNum(1).build();
Assert.assertEquals(1, infoService.selectBackendIdsByPolicy(policy04,
1).size());
// one compute node and two mix node
@@ -264,11 +264,11 @@ public class SystemInfoServiceTest {
Assert.assertEquals(0, infoService.selectBackendIdsByPolicy(policy05,
3).size());
BeSelectionPolicy policy06 = new
BeSelectionPolicy.Builder().addTags(Sets.newHashSet(taga))
-
.setStorageMedium(TStorageMedium.HDD).preferComputeNode().assignCandidateNum(2).build();
+
.setStorageMedium(TStorageMedium.HDD).preferComputeNode(true).assignExpectBeNum(2).build();
Assert.assertEquals(2, infoService.selectBackendIdsByPolicy(policy06,
2).size());
BeSelectionPolicy policy07 = new
BeSelectionPolicy.Builder().addTags(Sets.newHashSet(taga))
-
.setStorageMedium(TStorageMedium.HDD).preferComputeNode().assignCandidateNum(3).build();
+
.setStorageMedium(TStorageMedium.HDD).preferComputeNode(true).assignExpectBeNum(3).build();
Assert.assertEquals(3, infoService.selectBackendIdsByPolicy(policy07,
3).size());
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]