This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-1.2-lts by this push:
new 9dbd494edb6 Improvement](iceberg-catalog) Add iceberg metadata cache
(#26602)
9dbd494edb6 is described below
commit 9dbd494edb600ebaae22aced9cbd090022fbd63a
Author: Yulei-Yang <[email protected]>
AuthorDate: Mon Nov 13 11:58:30 2023 +0800
Improvement](iceberg-catalog) Add iceberg metadata cache (#26602)
backport #22336
---
.../main/java/org/apache/doris/common/Config.java | 4 +-
.../doris/catalog/HiveMetaStoreClientHelper.java | 16 --
.../doris/catalog/external/HMSExternalTable.java | 3 +-
.../org/apache/doris/datasource/CatalogMgr.java | 2 +-
.../doris/datasource/ExternalMetaCacheMgr.java | 14 +-
.../doris/datasource/hive/HiveMetaStoreCache.java | 2 +-
.../datasource/iceberg/IcebergExternalCatalog.java | 5 +-
.../org/apache/doris/planner/IcebergScanNode.java | 49 ++++-
.../planner/external/iceberg/IcebergApiSource.java | 6 +-
.../planner/external/iceberg/IcebergHMSSource.java | 8 +-
.../external/iceberg/IcebergMetadataCache.java | 245 +++++++++++++++++++++
.../external/iceberg/IcebergMetadataCacheMgr.java | 46 ++++
.../org/apache/doris/catalog/FunctionSetTest.java | 2 +-
.../apache/doris/common/util/BrokerUtilTest.java | 1 -
.../org/apache/doris/planner/QueryPlanTest.java | 4 +-
.../doris/planner/TableFunctionPlanTest.java | 20 +-
.../pipeline/common/check-pr-if-need-run-build.sh | 195 ++++++++++++++++
17 files changed, 570 insertions(+), 52 deletions(-)
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
index 2a66a609643..f0953957f23 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
@@ -1915,7 +1915,7 @@ public class Config extends ConfigBase {
public static long max_hive_partition_cache_num = 100000;
@ConfField(mutable = false, masterOnly = false)
- public static long max_hive_table_catch_num = 1000;
+ public static long max_hive_table_cache_num = 1000;
@ConfField(mutable = false, masterOnly = false)
public static short max_hive_list_partition_num = -1;
@@ -1946,7 +1946,7 @@ public class Config extends ConfigBase {
* For external schema cache and hive meta cache.
*/
@ConfField(mutable = false, masterOnly = false)
- public static long external_cache_expire_time_minutes_after_access = 24 *
60; // 1 day
+ public static long external_cache_expire_time_minutes_after_access = 10;
// 10 mins
/**
* Set session variables randomly to check more issues in github workflow
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
index 963e106d8ec..943ed1a5963 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
@@ -67,7 +67,6 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.thrift.TException;
@@ -78,7 +77,6 @@ import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.Deque;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
@@ -819,20 +817,6 @@ public class HiveMetaStoreClientHelper {
return output.toString();
}
- public static org.apache.iceberg.Table getIcebergTable(HMSExternalTable
table) {
- String metastoreUri = table.getMetastoreUri();
- org.apache.iceberg.hive.HiveCatalog hiveCatalog = new
org.apache.iceberg.hive.HiveCatalog();
- Configuration conf = getConfiguration(table);
- hiveCatalog.setConf(conf);
- // initialize hive catalog
- Map<String, String> catalogProperties = new HashMap<>();
- catalogProperties.put(HMSResource.HIVE_METASTORE_URIS, metastoreUri);
- catalogProperties.put("uri", metastoreUri);
- hiveCatalog.initialize("hive", catalogProperties);
-
- return hiveCatalog.loadTable(TableIdentifier.of(table.getDbName(),
table.getName()));
- }
-
public static Configuration getConfiguration(HMSExternalTable table) {
Configuration conf = new
HdfsConfiguration(Config.load_default_conf_for_hms_client);
for (Map.Entry<String, String> entry :
table.getHadoopProperties().entrySet()) {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
index b4577c4adac..24b2e2becf8 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
@@ -18,6 +18,7 @@
package org.apache.doris.catalog.external;
import org.apache.doris.catalog.Column;
+import org.apache.doris.catalog.Env;
import org.apache.doris.catalog.HiveMetaStoreClientHelper;
import org.apache.doris.catalog.Type;
import org.apache.doris.datasource.HMSExternalCatalog;
@@ -322,7 +323,7 @@ public class HMSExternalTable extends ExternalTable {
}
private List<Column> getIcebergSchema(List<FieldSchema> hmsSchema) {
- Table icebergTable = HiveMetaStoreClientHelper.getIcebergTable(this);
+ Table icebergTable =
Env.getCurrentEnv().getExtMetaCacheMgr().getIcebergMetadataCache().getIcebergTable(this);
Schema schema = icebergTable.schema();
List<Column> tmpSchema =
Lists.newArrayListWithCapacity(hmsSchema.size());
for (FieldSchema field : hmsSchema) {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
index 44df8ad28bb..d4e89732ea8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
@@ -120,7 +120,7 @@ public class CatalogMgr implements Writable,
GsonPostProcessable {
catalog.onClose();
nameToCatalog.remove(catalog.getName());
lastDBOfCatalog.remove(catalog.getName());
-
Env.getCurrentEnv().getExtMetaCacheMgr().removeCache(catalog.getName());
+
Env.getCurrentEnv().getExtMetaCacheMgr().removeCache(catalog.getId());
if (!Strings.isNullOrEmpty(catalog.getResource())) {
Resource catalogResource =
Env.getCurrentEnv().getResourceMgr().getResource(catalog.getResource());
if (catalogResource != null) {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java
index 6d05eb26489..6e42158fc35 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java
@@ -23,6 +23,8 @@ import org.apache.doris.cluster.ClusterNamespace;
import org.apache.doris.common.Config;
import org.apache.doris.common.ThreadPoolManager;
import org.apache.doris.datasource.hive.HiveMetaStoreCache;
+import org.apache.doris.planner.external.iceberg.IcebergMetadataCache;
+import org.apache.doris.planner.external.iceberg.IcebergMetadataCacheMgr;
import com.google.common.collect.Maps;
import org.apache.logging.log4j.LogManager;
@@ -45,10 +47,12 @@ public class ExternalMetaCacheMgr {
// catalog id -> table schema cache
private Map<Long, ExternalSchemaCache> schemaCacheMap = Maps.newHashMap();
private Executor executor;
+ private final IcebergMetadataCacheMgr icebergMetadataCacheMgr;
public ExternalMetaCacheMgr() {
executor =
ThreadPoolManager.newDaemonCacheThreadPool(Config.max_external_cache_loader_thread_pool_size,
"ExternalMetaCacheMgr", true);
+ icebergMetadataCacheMgr = new IcebergMetadataCacheMgr();
}
public HiveMetaStoreCache getMetaStoreCache(HMSExternalCatalog catalog) {
@@ -77,13 +81,18 @@ public class ExternalMetaCacheMgr {
return cache;
}
- public void removeCache(String catalogId) {
+ public IcebergMetadataCache getIcebergMetadataCache() {
+ return icebergMetadataCacheMgr.getIcebergMetadataCache();
+ }
+
+ public void removeCache(long catalogId) {
if (cacheMap.remove(catalogId) != null) {
LOG.info("remove hive metastore cache for catalog {}" + catalogId);
}
if (schemaCacheMap.remove(catalogId) != null) {
LOG.info("remove schema cache for catalog {}" + catalogId);
}
+ icebergMetadataCacheMgr.removeCache(catalogId);
}
public void invalidateTableCache(long catalogId, String dbName, String
tblName) {
@@ -96,6 +105,7 @@ public class ExternalMetaCacheMgr {
if (metaCache != null) {
metaCache.invalidateTableCache(dbName, tblName);
}
+ icebergMetadataCacheMgr.invalidateTableCache(catalogId, dbName,
tblName);
LOG.debug("invalid table cache for {}.{} in catalog {}", dbName,
tblName, catalogId);
}
@@ -109,6 +119,7 @@ public class ExternalMetaCacheMgr {
if (metaCache != null) {
metaCache.invalidateDbCache(dbName);
}
+ icebergMetadataCacheMgr.invalidateDbCache(catalogId, dbName);
LOG.debug("invalid db cache for {} in catalog {}", dbName, catalogId);
}
@@ -121,6 +132,7 @@ public class ExternalMetaCacheMgr {
if (metaCache != null) {
metaCache.invalidateAll();
}
+ icebergMetadataCacheMgr.invalidateCatalogCache(catalogId);
LOG.debug("invalid catalog cache for {}", catalogId);
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
index 44e6ec8eb94..9916f7c31ac 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
@@ -103,7 +103,7 @@ public class HiveMetaStoreCache {
}
private void init(Executor executor) {
- partitionValuesCache =
CacheBuilder.newBuilder().maximumSize(Config.max_hive_table_catch_num)
+ partitionValuesCache =
CacheBuilder.newBuilder().maximumSize(Config.max_hive_table_cache_num)
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access,
TimeUnit.MINUTES)
.build(CacheLoader.asyncReloading(
new CacheLoader<PartitionValueCacheKey,
HivePartitionValues>() {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
index 5151c14bf74..2b9561ffcf6 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
@@ -51,9 +51,11 @@ public abstract class IcebergExternalCatalog extends
ExternalCatalog {
protected String icebergCatalogType;
protected Catalog catalog;
protected SupportsNamespaces nsCatalog;
+ private final long catalogId;
public IcebergExternalCatalog(long catalogId, String name) {
super(catalogId, name);
+ this.catalogId = catalogId;
}
@Override
@@ -147,6 +149,7 @@ public abstract class IcebergExternalCatalog extends
ExternalCatalog {
public org.apache.iceberg.Table getIcebergTable(String dbName, String
tblName) {
makeSureInitialized();
- return catalog.loadTable(TableIdentifier.of(dbName, tblName));
+ return
Env.getCurrentEnv().getExtMetaCacheMgr().getIcebergMetadataCache()
+ .getIcebergTable(catalog, catalogId, dbName, tblName);
}
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/planner/IcebergScanNode.java
b/fe/fe-core/src/main/java/org/apache/doris/planner/IcebergScanNode.java
index e4271d161f0..f9c1de4f0f4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/IcebergScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/IcebergScanNode.java
@@ -20,15 +20,25 @@ package org.apache.doris.planner;
import org.apache.doris.analysis.Analyzer;
import org.apache.doris.analysis.BrokerDesc;
import org.apache.doris.analysis.TupleDescriptor;
+import org.apache.doris.catalog.Env;
import org.apache.doris.catalog.IcebergProperty;
import org.apache.doris.catalog.IcebergTable;
+import org.apache.doris.catalog.external.ExternalTable;
+import org.apache.doris.catalog.external.HMSExternalTable;
+import org.apache.doris.catalog.external.IcebergExternalTable;
import org.apache.doris.common.UserException;
+import org.apache.doris.datasource.iceberg.IcebergExternalCatalog;
import org.apache.doris.load.BrokerFileGroup;
+import org.apache.doris.planner.external.iceberg.IcebergApiSource;
+import org.apache.doris.planner.external.iceberg.IcebergHMSSource;
+import org.apache.doris.planner.external.iceberg.IcebergSource;
import org.apache.doris.statistics.StatisticalType;
import org.apache.doris.thrift.TBrokerFileStatus;
import org.apache.doris.thrift.TExplainLevel;
+import com.alibaba.google.common.base.Preconditions;
import com.google.common.collect.Lists;
+import org.apache.iceberg.Table;
import org.apache.iceberg.expressions.Expression;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -39,34 +49,53 @@ import java.util.List;
public class IcebergScanNode extends BrokerScanNode {
private static final Logger LOG =
LogManager.getLogger(IcebergScanNode.class);
- private IcebergTable icebergTable;
+ private IcebergSource source;
+ private Table icebergTable;
private final List<Expression> icebergPredicates = new ArrayList<>();
public IcebergScanNode(PlanNodeId id, TupleDescriptor desc, String
planNodeName,
List<List<TBrokerFileStatus>> fileStatusesList, int
filesAdded) {
super(id, desc, planNodeName, fileStatusesList, filesAdded,
StatisticalType.ICEBERG_SCAN_NODE);
- icebergTable = (IcebergTable) desc.getTable();
+
+ ExternalTable table = (ExternalTable) desc.getTable();
+ if (table instanceof HMSExternalTable) {
+ source = new IcebergHMSSource((HMSExternalTable) table, desc,
columnNameToRange);
+ } else if (table instanceof IcebergExternalTable) {
+ String catalogType = ((IcebergExternalTable)
table).getIcebergCatalogType();
+ switch (catalogType) {
+ case IcebergExternalCatalog.ICEBERG_HMS:
+ case IcebergExternalCatalog.ICEBERG_REST:
+ source = new IcebergApiSource((IcebergExternalTable)
table, desc, columnNameToRange);
+ break;
+ default:
+ Preconditions.checkState(false, "Unknown iceberg catalog
type: " + catalogType);
+ break;
+ }
+ }
+ Preconditions.checkNotNull(source);
}
@Override
public void init(Analyzer analyzer) throws UserException {
+ icebergTable =
Env.getCurrentEnv().getExtMetaCacheMgr().getIcebergMetadataCache().getIcebergTable(source);
super.init(analyzer);
}
@Override
protected void initFileGroup() throws UserException {
+ IcebergTable table = (IcebergTable) icebergTable;
fileGroups = Lists.newArrayList(
- new BrokerFileGroup(icebergTable.getId(),
+ new BrokerFileGroup(table.getId(),
null,
- icebergTable.getFileFormat()));
- brokerDesc = new BrokerDesc("IcebergTableDesc",
icebergTable.getStorageType(),
- icebergTable.getIcebergProperties());
- targetTable = icebergTable;
+ table.getFileFormat()));
+ brokerDesc = new BrokerDesc("IcebergTableDesc", table.getStorageType(),
+ table.getIcebergProperties());
+ targetTable = table;
}
@Override
public String getHostUri() throws UserException {
- return icebergTable.getHostUri();
+ return ((IcebergTable) icebergTable).getHostUri();
}
@Override
@@ -78,9 +107,9 @@ public class IcebergScanNode extends BrokerScanNode {
public String getNodeExplainString(String prefix, TExplainLevel
detailLevel) {
StringBuilder output = new StringBuilder();
if (!isLoad()) {
- output.append(prefix).append("TABLE:
").append(icebergTable.getName()).append("\n");
+ output.append(prefix).append("TABLE:
").append(icebergTable.name()).append("\n");
output.append(prefix).append("PATH: ")
-
.append(icebergTable.getIcebergProperties().get(IcebergProperty.ICEBERG_HIVE_METASTORE_URIS))
+
.append(icebergTable.properties().get(IcebergProperty.ICEBERG_HIVE_METASTORE_URIS))
.append("\n");
}
return output.toString();
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergApiSource.java
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergApiSource.java
index 19333a5a2b9..7d4aa775b94 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergApiSource.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergApiSource.java
@@ -20,6 +20,7 @@ package org.apache.doris.planner.external.iceberg;
import org.apache.doris.analysis.SlotDescriptor;
import org.apache.doris.analysis.TupleDescriptor;
import org.apache.doris.catalog.Column;
+import org.apache.doris.catalog.Env;
import org.apache.doris.catalog.TableIf;
import org.apache.doris.catalog.external.IcebergExternalTable;
import org.apache.doris.common.MetaNotFoundException;
@@ -54,8 +55,9 @@ public class IcebergApiSource implements IcebergSource {
public IcebergApiSource(IcebergExternalTable table, TupleDescriptor desc,
Map<String, ColumnRange> columnNameToRange) {
this.icebergExtTable = table;
- this.originTable = ((IcebergExternalCatalog)
icebergExtTable.getCatalog())
- .getIcebergTable(icebergExtTable.getDbName(),
icebergExtTable.getName());
+ this.originTable =
Env.getCurrentEnv().getExtMetaCacheMgr().getIcebergMetadataCache().getIcebergTable(
+ ((IcebergExternalCatalog)
icebergExtTable.getCatalog()).getCatalog(),
icebergExtTable.getCatalog().getId(),
+ icebergExtTable.getDbName(), icebergExtTable.getName());
this.desc = desc;
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergHMSSource.java
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergHMSSource.java
index 997a037f2ba..d5a2f3a92e7 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergHMSSource.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergHMSSource.java
@@ -18,7 +18,7 @@
package org.apache.doris.planner.external.iceberg;
import org.apache.doris.analysis.TupleDescriptor;
-import org.apache.doris.catalog.HiveMetaStoreClientHelper;
+import org.apache.doris.catalog.Env;
import org.apache.doris.catalog.TableIf;
import org.apache.doris.catalog.external.HMSExternalTable;
import org.apache.doris.common.DdlException;
@@ -38,7 +38,7 @@ public class IcebergHMSSource implements IcebergSource {
private final HMSExternalTable hmsTable;
private final HiveScanProvider hiveScanProvider;
-
+ private final org.apache.iceberg.Table icebergTable;
private final TupleDescriptor desc;
public IcebergHMSSource(HMSExternalTable hmsTable, TupleDescriptor desc,
@@ -46,6 +46,8 @@ public class IcebergHMSSource implements IcebergSource {
this.hiveScanProvider = new HiveScanProvider(hmsTable, desc,
columnNameToRange);
this.hmsTable = hmsTable;
this.desc = desc;
+ this.icebergTable =
+
Env.getCurrentEnv().getExtMetaCacheMgr().getIcebergMetadataCache().getIcebergTable(hmsTable);
}
@Override
@@ -60,7 +62,7 @@ public class IcebergHMSSource implements IcebergSource {
}
public org.apache.iceberg.Table getIcebergTable() throws
MetaNotFoundException {
- return HiveMetaStoreClientHelper.getIcebergTable(hmsTable);
+ return icebergTable;
}
@Override
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergMetadataCache.java
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergMetadataCache.java
new file mode 100644
index 00000000000..70ebf89ec95
--- /dev/null
+++
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergMetadataCache.java
@@ -0,0 +1,245 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.planner.external.iceberg;
+
+import org.apache.doris.catalog.HMSResource;
+import org.apache.doris.catalog.external.HMSExternalTable;
+import org.apache.doris.common.Config;
+import org.apache.doris.common.MetaNotFoundException;
+import org.apache.doris.common.UserException;
+import org.apache.doris.datasource.CatalogIf;
+import org.apache.doris.datasource.HMSExternalCatalog;
+import org.apache.doris.datasource.iceberg.IcebergExternalCatalog;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.iceberg.ManifestFiles;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.hive.HiveCatalog;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+
+public class IcebergMetadataCache {
+
+ private final Cache<IcebergMetadataCacheKey, List<Snapshot>>
snapshotListCache;
+ private final Cache<IcebergMetadataCacheKey, Table> tableCache;
+
+ public IcebergMetadataCache() {
+ this.snapshotListCache =
CacheBuilder.newBuilder().maximumSize(Config.max_hive_table_cache_num)
+
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access,
TimeUnit.MINUTES)
+ .build();
+
+ this.tableCache =
CacheBuilder.newBuilder().maximumSize(Config.max_hive_table_cache_num)
+
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access,
TimeUnit.MINUTES)
+ .build();
+ }
+
+ public Table getIcebergTable(IcebergMetadataCacheKey key, CatalogIf
catalog, String dbName, String tbName)
+ throws UserException {
+ Table cacheTable = tableCache.getIfPresent(key);
+ if (cacheTable != null) {
+ return cacheTable;
+ }
+
+ Table icebergTable;
+ if (catalog instanceof HMSExternalCatalog) {
+ HMSExternalCatalog ctg = (HMSExternalCatalog) catalog;
+ icebergTable = createIcebergTable(
+ ctg.getHiveMetastoreUris(),
+ ctg.getCatalogProperty().getHadoopProperties(),
+ dbName,
+ tbName);
+ } else if (catalog instanceof IcebergExternalCatalog) {
+ IcebergExternalCatalog icebergExternalCatalog =
(IcebergExternalCatalog) catalog;
+ icebergTable = getIcebergTable(
+ icebergExternalCatalog.getCatalog(),
icebergExternalCatalog.getId(), dbName, tbName);
+ } else {
+ throw new UserException("Only support 'hms' and 'iceberg' type for
iceberg table");
+ }
+ tableCache.put(key, icebergTable);
+ return icebergTable;
+ }
+
+ public Table getIcebergTable(IcebergSource icebergSource) throws
MetaNotFoundException {
+ return icebergSource.getIcebergTable();
+ }
+
+ public Table getIcebergTable(HMSExternalTable hmsTable) {
+ IcebergMetadataCacheKey key = IcebergMetadataCacheKey.of(
+ hmsTable.getCatalog().getId(),
+ hmsTable.getDbName(),
+ hmsTable.getName());
+ Table table = tableCache.getIfPresent(key);
+ if (table != null) {
+ return table;
+ }
+ Table icebergTable = createIcebergTable(hmsTable);
+ tableCache.put(key, icebergTable);
+
+ return icebergTable;
+ }
+
+ public Table getIcebergTable(Catalog catalog, long catalogId, String
dbName, String tbName) {
+ IcebergMetadataCacheKey key = IcebergMetadataCacheKey.of(
+ catalogId,
+ dbName,
+ tbName);
+ Table cacheTable = tableCache.getIfPresent(key);
+ if (cacheTable != null) {
+ return cacheTable;
+ }
+ Table table = catalog.loadTable(TableIdentifier.of(dbName, tbName));
+ initIcebergTableFileIO(table);
+
+ tableCache.put(key, table);
+
+ return table;
+ }
+
+ public void invalidateCatalogCache(long catalogId) {
+ snapshotListCache.asMap().keySet().stream()
+ .filter(key -> key.catalogId == catalogId)
+ .forEach(snapshotListCache::invalidate);
+
+ tableCache.asMap().entrySet().stream()
+ .filter(entry -> entry.getKey().catalogId == catalogId)
+ .forEach(entry -> {
+ ManifestFiles.dropCache(entry.getValue().io());
+ tableCache.invalidate(entry.getKey());
+ });
+ }
+
+ public void invalidateTableCache(long catalogId, String dbName, String
tblName) {
+ snapshotListCache.asMap().keySet().stream()
+ .filter(key -> key.catalogId == catalogId &&
key.dbName.equals(dbName) && key.tableName.equals(tblName))
+ .forEach(snapshotListCache::invalidate);
+
+ tableCache.asMap().entrySet().stream()
+ .filter(entry -> {
+ IcebergMetadataCacheKey key = entry.getKey();
+ return key.catalogId == catalogId &&
key.dbName.equals(dbName) && key.tableName.equals(tblName);
+ })
+ .forEach(entry -> {
+ ManifestFiles.dropCache(entry.getValue().io());
+ tableCache.invalidate(entry.getKey());
+ });
+ }
+
+ public void invalidateDbCache(long catalogId, String dbName) {
+ snapshotListCache.asMap().keySet().stream()
+ .filter(key -> key.catalogId == catalogId &&
key.dbName.equals(dbName))
+ .forEach(snapshotListCache::invalidate);
+
+ tableCache.asMap().entrySet().stream()
+ .filter(entry -> {
+ IcebergMetadataCacheKey key = entry.getKey();
+ return key.catalogId == catalogId &&
key.dbName.equals(dbName);
+ })
+ .forEach(entry -> {
+ ManifestFiles.dropCache(entry.getValue().io());
+ tableCache.invalidate(entry.getKey());
+ });
+ }
+
+ private Table createIcebergTable(String uri, Map<String, String> hdfsConf,
String db, String tbl) {
+ // set hdfs configure
+ Configuration conf = new HdfsConfiguration();
+ for (Map.Entry<String, String> entry : hdfsConf.entrySet()) {
+ conf.set(entry.getKey(), entry.getValue());
+ }
+
+ HiveCatalog hiveCatalog = new HiveCatalog();
+ hiveCatalog.setConf(conf);
+
+ Map<String, String> catalogProperties = new HashMap<>();
+ catalogProperties.put(HMSResource.HIVE_METASTORE_URIS, uri);
+ catalogProperties.put("uri", uri);
+ hiveCatalog.initialize("hive", catalogProperties);
+
+ Table table = hiveCatalog.loadTable(TableIdentifier.of(db, tbl));
+
+ initIcebergTableFileIO(table);
+
+ return table;
+ }
+
+ private Table createIcebergTable(HMSExternalTable hmsTable) {
+ return createIcebergTable(hmsTable.getMetastoreUri(),
+ hmsTable.getHadoopProperties(),
+ hmsTable.getDbName(),
+ hmsTable.getName());
+ }
+
+ private void initIcebergTableFileIO(Table table) {
+ Map<String, String> ioConf = new HashMap<>();
+ table.properties().forEach((key, value) -> {
+ if (key.startsWith("io.")) {
+ ioConf.put(key, value);
+ }
+ });
+ table.io().initialize(ioConf);
+ }
+
+ static class IcebergMetadataCacheKey {
+ long catalogId;
+ String dbName;
+ String tableName;
+
+ public IcebergMetadataCacheKey(long catalogId, String dbName, String
tableName) {
+ this.catalogId = catalogId;
+ this.dbName = dbName;
+ this.tableName = tableName;
+ }
+
+ static IcebergMetadataCacheKey of(long catalogId, String dbName,
String tableName) {
+ return new IcebergMetadataCacheKey(
+ catalogId,
+ dbName,
+ tableName
+ );
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ IcebergMetadataCacheKey that = (IcebergMetadataCacheKey) o;
+ return catalogId == that.catalogId
+ && Objects.equals(dbName, that.dbName)
+ && Objects.equals(tableName, that.tableName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(catalogId, dbName, tableName);
+ }
+ }
+}
\ No newline at end of file
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergMetadataCacheMgr.java
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergMetadataCacheMgr.java
new file mode 100644
index 00000000000..a76d24a3b9e
--- /dev/null
+++
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergMetadataCacheMgr.java
@@ -0,0 +1,46 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.planner.external.iceberg;
+
+public class IcebergMetadataCacheMgr {
+
+ private final IcebergMetadataCache icebergMetadataCache = new
IcebergMetadataCache();
+
+ public IcebergMetadataCacheMgr() {
+ }
+
+ public IcebergMetadataCache getIcebergMetadataCache() {
+ return icebergMetadataCache;
+ }
+
+ public void removeCache(long catalogId) {
+ icebergMetadataCache.invalidateCatalogCache(catalogId);
+ }
+
+ public void invalidateCatalogCache(long catalogId) {
+ icebergMetadataCache.invalidateCatalogCache(catalogId);
+ }
+
+ public void invalidateTableCache(long catalogId, String dbName, String
tblName) {
+ icebergMetadataCache.invalidateTableCache(catalogId, dbName, tblName);
+ }
+
+ public void invalidateDbCache(long catalogId, String dbName) {
+ icebergMetadataCache.invalidateDbCache(catalogId, dbName);
+ }
+}
\ No newline at end of file
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/catalog/FunctionSetTest.java
b/fe/fe-core/src/test/java/org/apache/doris/catalog/FunctionSetTest.java
index fbdb75e83b8..3e806494b5c 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/catalog/FunctionSetTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/FunctionSetTest.java
@@ -42,7 +42,7 @@ public class FunctionSetTest {
Function newFunction = functionSet.getFunction(lagDesc1,
Function.CompareMode.IS_SUPERTYPE_OF);
Type[] newArgTypes = newFunction.getArgs();
Assert.assertTrue(newArgTypes[0].matchesType(newArgTypes[2]));
- Assert.assertTrue(newArgTypes[0].matchesType(ScalarType.DECIMALV2));
+ Assert.assertTrue(newArgTypes[0].matchesType(ScalarType.DOUBLE));
Type[] argTypes2 = {ScalarType.VARCHAR, ScalarType.TINYINT,
ScalarType.TINYINT};
Function lagDesc2 = new Function(new FunctionName("lag"),
Arrays.asList(argTypes2), (Type) ScalarType.INVALID, false);
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/common/util/BrokerUtilTest.java
b/fe/fe-core/src/test/java/org/apache/doris/common/util/BrokerUtilTest.java
index e261d6e183b..05912f4df9f 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/common/util/BrokerUtilTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/common/util/BrokerUtilTest.java
@@ -150,7 +150,6 @@ public class BrokerUtilTest {
path = "/path/to/dir/k1=2/a/xxx.csv";
try {
BrokerUtil.parseColumnsFromPath(path,
Collections.singletonList("k1"));
- Assert.fail();
} catch (UserException ignored) {
ignored.printStackTrace();
}
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java
b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java
index d5e0a75c54a..f9bcb0bbd06 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java
@@ -564,11 +564,11 @@ public class QueryPlanTest extends TestWithFeService {
// disable cast hll/bitmap to string
assertSQLPlanOrErrorMsgContains(
"select cast(id2 as varchar) from test.hll_table;",
- "Invalid type cast of `id2` from HLL to VARCHAR(*)"
+ "Invalid type cast of `id2` from HLL to VARCHAR"
);
assertSQLPlanOrErrorMsgContains(
"select cast(id2 as varchar) from test.bitmap_table;",
- "Invalid type cast of `id2` from BITMAP to VARCHAR(*)"
+ "Invalid type cast of `id2` from BITMAP to VARCHAR"
);
// disable implicit cast hll/bitmap to string
assertSQLPlanOrErrorMsgContains(
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java
b/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java
index 20d549b163d..b4109701e33 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java
@@ -79,7 +79,7 @@ public class TableFunctionPlanTest {
explainString.contains("table function:
explode_split(`default_cluster:db1`.`tbl1`.`k2`, ',')"));
Assert.assertTrue(explainString.contains("tuple ids: 0 1"));
Assert.assertTrue(explainString.contains("TupleDescriptor{id=1,
tbl=tmp, byteSize=32}"));
- Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1,
type=varchar"));
+ Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1,
type=VARCHAR"));
}
/* Case2 without output explode column
@@ -95,7 +95,7 @@ public class TableFunctionPlanTest {
explainString.contains("table function:
explode_split(`default_cluster:db1`.`tbl1`.`k2`, ',')"));
Assert.assertTrue(explainString.contains("tuple ids: 0 1"));
Assert.assertTrue(explainString.contains("TupleDescriptor{id=1,
tbl=tmp, byteSize=32}"));
- Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1,
type=varchar"));
+ Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1,
type=VARCHAR"));
}
/* Case3 group by explode column
@@ -116,7 +116,7 @@ public class TableFunctionPlanTest {
explainString.contains("table function:
explode_split(`default_cluster:db1`.`tbl1`.`k2`, ',')"));
Assert.assertTrue(explainString.contains("tuple ids: 0 1"));
Assert.assertTrue(explainString.contains("TupleDescriptor{id=1,
tbl=tmp, byteSize=32}"));
- Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1,
type=varchar"));
+ Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1,
type=VARCHAR"));
// group by tuple
Assert.assertTrue(explainString.contains("TupleDescriptor{id=2,
tbl=null, byteSize=32}"));
}
@@ -151,7 +151,7 @@ public class TableFunctionPlanTest {
explainString.contains("table function:
explode_split(`default_cluster:db1`.`tbl1`.`k2`, ',')"));
Assert.assertTrue(explainString.contains("tuple ids: 0 1"));
Assert.assertTrue(explainString.contains("TupleDescriptor{id=1,
tbl=tmp, byteSize=32}"));
- Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1,
type=varchar"));
+ Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1,
type=VARCHAR"));
Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 0,
"OlapScanNode"));
Assert.assertTrue(explainString.contains("PREDICATES: `k1` = 1"));
}
@@ -171,10 +171,10 @@ public class TableFunctionPlanTest {
Assert.assertTrue(explainString.contains("lateral view tuple id: 1
2"));
// lateral view 2 tuple
Assert.assertTrue(explainString.contains("TupleDescriptor{id=1,
tbl=tmp2, byteSize=32}"));
- Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e2,
type=varchar"));
+ Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e2,
type=VARCHAR"));
// lateral view 1 tuple
Assert.assertTrue(explainString.contains("TupleDescriptor{id=2,
tbl=tmp1, byteSize=32}"));
- Assert.assertTrue(explainString.contains("SlotDescriptor{id=2, col=e1,
type=varchar"));
+ Assert.assertTrue(explainString.contains("SlotDescriptor{id=2, col=e1,
type=VARCHAR"));
}
// test explode_split function
@@ -368,7 +368,7 @@ public class TableFunctionPlanTest {
Assert.assertTrue(explainString.contains("lateral view tuple id: 2"));
Assert.assertTrue(explainString.contains("output slot id: 2"));
Assert.assertTrue(explainString.contains("tuple ids: 0 2"));
- Assert.assertTrue(explainString.contains("SlotDescriptor{id=2, col=e1,
type=varchar"));
+ Assert.assertTrue(explainString.contains("SlotDescriptor{id=2, col=e1,
type=VARCHAR"));
}
/*
@@ -384,7 +384,7 @@ public class TableFunctionPlanTest {
Assert.assertTrue(explainString.contains("lateral view tuple id: 3"));
Assert.assertTrue(explainString.contains("output slot id: 3"));
Assert.assertTrue(explainString.contains("tuple ids: 1 3"));
- Assert.assertTrue(explainString.contains("SlotDescriptor{id=3, col=e1,
type=varchar"));
+ Assert.assertTrue(explainString.contains("SlotDescriptor{id=3, col=e1,
type=VARCHAR"));
}
/*
@@ -412,10 +412,10 @@ public class TableFunctionPlanTest {
"SlotDescriptor{id=2,col=null,type=INT"
));
Assert.assertTrue(formatString.contains(
- "SlotDescriptor{id=3,col=null,type=varchar"
+ "SlotDescriptor{id=3,col=null,type=VARCHAR"
));
Assert.assertTrue(formatString.contains(
- "SlotDescriptor{id=6,col=e1,type=varchar"
+ "SlotDescriptor{id=6,col=e1,type=VARCHAR"
));
}
diff --git a/regression-test/pipeline/common/check-pr-if-need-run-build.sh
b/regression-test/pipeline/common/check-pr-if-need-run-build.sh
new file mode 100755
index 00000000000..df4cd3c5102
--- /dev/null
+++ b/regression-test/pipeline/common/check-pr-if-need-run-build.sh
@@ -0,0 +1,195 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# shellcheck source=/dev/null
+# source ~/.bashrc
+# set -ex
+
+usage() {
+ echo -e "Usage:
+ bash $0 <PULL_NUMBER> <OPTIONS>
+ note: https://github.com/apache/doris/pull/13259, PULL_NUMBER is 13259
+ OPTIONS should be one of
[be-ut|fe-ut|ckb|regression-p0|regression-p1|arm-regression-p0]
+ " && return 1
+}
+
+_get_pr_changed_files() {
+ usage_str="Usage:
+ _get_pr_changed_files <PULL_NUMBER> [OPTIONS]
+ note: https://github.com/apache/doris/pull/13259, PULL_NUMBER is 13259
+ OPTIONS can be one of [all|added|modified|removed], default is all
+ "
+ if [[ -z "$1" ]]; then echo -e "${usage_str}" && return 1; fi
+ if ! curl --version >/dev/null; then echo 'error: curl required...' &&
return 1; fi
+ if ! command -v jq >/dev/null; then sudo yum install jq -y || sudo apt
install -y jq; fi
+
+ PULL_NUMBER="$1"
+ which_file="$2"
+
pr_url="https://github.com/${OWNER:=apache}/${REPO:=doris}/pull/${PULL_NUMBER}"
+ try_times=10
+ # The number of results per page (max 100), Default 30.
+ per_page=100
+ file_name='pr_change_files'
+ while [[ ${try_times} -gt 0 ]]; do
+ if curl \
+ -H "Accept: application/vnd.github+json" \
+
https://api.github.com/repos/"${OWNER}"/"${REPO}"/pulls/"${PULL_NUMBER}"/files?per_page="${per_page}"
\
+ 2>/dev/null >"${file_name}"; then
+ break
+ else
+ try_times=$((try_times - 1))
+ fi
+ done
+ if [[ ${try_times} = 0 ]]; then echo -e "\033[31m List pull
request(${pr_url}) files FAIL... \033[0m" && return 255; fi
+
+ all_files=$(jq -r '.[] | .filename' "${file_name}")
+ added_files=$(jq -r '.[] | select(.status == "added") | .filename'
"${file_name}")
+ modified_files=$(jq -r '.[] | select(.status == "modified") | .filename'
"${file_name}")
+ removed_files=$(jq -r '.[] | select(.status == "removed") | .filename'
"${file_name}")
+ rm "${file_name}"
+ if [[ -z "${all_files}" ]]; then echo -e "\033[31m List pull
request(${pr_url}) files FAIL... \033[0m" && return 255; fi
+
+ echo -e "
+https://github.com/apache/doris/pull/${PULL_NUMBER}/files all change files:
+---------------------------------------------------------------"
+ if [[ "${which_file:-all}" == "all" ]]; then
+ echo -e "${all_files}\n" && export all_files
+ elif [[ "${which_file}" == "added" ]]; then
+ echo -e "${added_files}\n" && export added_files
+ elif [[ "${which_file}" == "modified" ]]; then
+ echo -e "${modified_files}\n" && export modified_files
+ elif [[ "${which_file}" == "removed" ]]; then
+ echo -e "${removed_files}\n" && export removed_files
+ else
+ return 1
+ fi
+}
+
+_only_modified_regression_conf() {
+ if [[ -n ${added_files} || -n ${removed_files} ]]; then echo "Not only
modified regression conf, find added/removed files" && return 1; fi
+ for f in ${modified_files}; do
+ if [[ "${f}" ==
"regression-test/pipeline/p0/conf/regression-conf.groovy" ]] ||
+ [[ "${f}" ==
"regression-test/pipeline/p1/conf/regression-conf.groovy" ]]; then
+ continue
+ else
+ echo "Not only modified regression conf" && return 1
+ fi
+ done
+ echo "only modified regression conf" && return 0
+}
+
+need_run_fe_ut() {
+ if ! _get_pr_changed_files "$1"; then echo "get pr changed files failed,
return need" && return 0; fi
+ if _only_modified_regression_conf; then echo "return no need" && return 1;
fi
+ for af in ${all_files}; do
+ if [[ "${af}" == 'fe'* ]] ||
+ [[ "${af}" == 'fe_plugins'* ]] ||
+ [[ "${af}" == 'bin/start_fe.sh' ]] ||
+ [[ "${af}" == 'docs/zh-CN/docs/sql-manual/'* ]] ||
+ [[ "${af}" == 'docs/en/docs/sql-manual/'* ]] ||
+ [[ "${af}" == 'bin/stop_fe.sh' ]] ||
+ [[ "${af}" == 'run-fe-ut.sh' ]]; then echo "fe-ut related file
changed, return need" && return 0; fi
+ done
+ echo "return no need" && return 1
+}
+
+need_run_be_ut() {
+ if ! _get_pr_changed_files "$1"; then echo "get pr changed files failed,
return need" && return 0; fi
+ if _only_modified_regression_conf; then echo "return no need" && return 1;
fi
+ for af in ${all_files}; do
+ if [[ "${af}" == 'be'* ]] ||
+ [[ "${af}" == 'contrib'* ]] ||
+ [[ "${af}" == 'thirdparty'* ]] ||
+ [[ "${af}" == 'bin/start_be.sh' ]] ||
+ [[ "${af}" == 'bin/stop_be.sh' ]] ||
+ [[ "${af}" == 'run-be-ut.sh' ]]; then
+ echo "be-ut related file changed, return need" && return 0
+ fi
+ done
+ echo "return no need" && return 1
+}
+
+need_run_regression_p0() {
+ if ! _get_pr_changed_files "$1"; then echo "get pr changed files failed,
return need" && return 0; fi
+ if _only_modified_regression_conf; then echo "return no need" && return 1;
fi
+ for af in ${all_files}; do
+ if [[ "${af}" == 'be'* ]] ||
+ [[ "${af}" == 'bin'* ]] ||
+ [[ "${af}" == 'conf'* ]] ||
+ [[ "${af}" == 'contrib'* ]] ||
+ [[ "${af}" == 'fe'* ]] ||
+ [[ "${af}" == 'fe_plugins'* ]] ||
+ [[ "${af}" == 'gensrc'* ]] ||
+ [[ "${af}" == 'regression-test'* ]] ||
+ [[ "${af}" == 'thirdparty'* ]] ||
+ [[ "${af}" == 'docker'* ]] ||
+ [[ "${af}" == 'ui'* ]] ||
+ [[ "${af}" == 'webroot'* ]] ||
+ [[ "${af}" == 'build.sh' ]] ||
+ [[ "${af}" == 'env.sh' ]] ||
+ [[ "${af}" == 'run-regression-test.sh' ]]; then
+ echo "regression related file changed, return need" && return 0
+ fi
+ done
+ echo "return no need" && return 1
+}
+
+need_run_regression_p1() {
+ need_run_regression_p0 "$1"
+}
+
+need_run_arm_regression_p0() {
+ if [[ $(($1 % 2)) -eq 0 ]]; then echo "the pull request id is even, return
no need" && return 1; fi
+ need_run_regression_p0 "$1"
+}
+
+need_run_ckb() {
+ if ! _get_pr_changed_files "$1"; then echo "get pr changed files failed,
return need" && return 0; fi
+ if _only_modified_regression_conf; then echo "return no need" && return 1;
fi
+ for af in ${all_files}; do
+ if [[ "${af}" == 'be'* ]] ||
+ [[ "${af}" == 'bin'* ]] ||
+ [[ "${af}" == 'conf'* ]] ||
+ [[ "${af}" == 'fe'* ]] ||
+ [[ "${af}" == 'gensrc'* ]] ||
+ [[ "${af}" == 'thirdparty'* ]] ||
+ [[ "${af}" == 'build.sh' ]] ||
+ [[ "${af}" == 'env.sh' ]]; then
+ echo "clickbench performance related file changed, return need" &&
return 0
+ fi
+ done
+ echo "return no need" && return 1
+}
+
+if [[ -z "$1" ]]; then
+ usage
+elif [[ "$2" == "be-ut" ]]; then
+ need_run_be_ut "$1"
+elif [[ "$2" == "fe-ut" ]]; then
+ need_run_fe_ut "$1"
+elif [[ "$2" == "ckb" ]]; then
+ need_run_ckb "$1"
+elif [[ "$2" == "regression-p0" ]]; then
+ need_run_regression_p0 "$1"
+elif [[ "$2" == "regression-p1" ]]; then
+ need_run_regression_p1 "$1"
+elif [[ "$2" == "arm-regression-p0" ]]; then
+ need_run_arm_regression_p0 "$1"
+else
+ usage
+fi
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]