morningman commented on code in PR #14470:
URL: https://github.com/apache/doris/pull/14470#discussion_r1034900880
##########
fe/fe-core/src/main/java/org/apache/doris/planner/external/IcebergScanProvider.java:
##########
@@ -143,9 +184,14 @@ public List<InputSplit> getSplits(List<Expr> exprs) throws
UserException {
spitTask.length(), new String[0]);
split.setFormatVersion(formatVersion);
if (formatVersion == 2) {
+ String dbName =
ClusterNamespace.getFullName(analyzer.getClusterName(), hmsTable.getDbName());
+ DeleteFileTempTable deleteFileTable = new
DeleteFileTempTable(hmsTable.getCatalog(),
Review Comment:
No need to new `DeleteFileTempTable` for every split.
Actually, there can be only one `DeleteFileTempTable` in a Catalog.
And its name can be `iceberg_v2_delete_table`.
##########
gensrc/thrift/PlanNodes.thrift:
##########
@@ -268,7 +268,10 @@ struct TIcebergFileDesc {
1: optional i32 format_version;
// Iceberg file type, 0: data, 1: position delete, 2: equality delete.
2: optional i32 content;
- 3: optional list<TIcebergDeleteFileDesc> delete_files;
+ // When open a delete file, filter the data file path with the 'file_path'
property
+ 3: optional Exprs.TExpr file_select_conjunct;
+ 4: optional Types.TTupleId tuple_id;
Review Comment:
better use a concrete name, eg: `delete_table_tuple_id`
##########
fe/fe-core/src/main/java/org/apache/doris/planner/external/IcebergScanProvider.java:
##########
@@ -102,6 +124,25 @@ public static void
setIcebergParams(ExternalFileScanNode.ParamCreateContext cont
context.params.setTableFormatParams(tableFormatFileDesc);
}
+ private static void setPathSelectConjunct(TIcebergFileDesc fileDesc,
IcebergSplit icebergSplit)
+ throws UserException {
+ DeleteFileTempTable deleteFileTable =
icebergSplit.getDeleteFileTable();
+ DescriptorTable descTbl = icebergSplit.getAnalyzer().getDescTbl();
+ TableRef ref = new TableRef(deleteFileTable.getTableName(), null,
null);
+ BaseTableRef tableRef = new BaseTableRef(ref, deleteFileTable,
deleteFileTable.getTableName());
+ tableRef.analyze(icebergSplit.getAnalyzer());
+ fileDesc.setTupleId(tableRef.getDesc().getId().asInt());
+ descTbl.computeStatAndMemLayout();
+
+ SlotRef lhs = new SlotRef(deleteFileTable.getTableName(),
DeleteFileTempTable.DATA_FILE_PATH);
+ lhs.analyze(icebergSplit.getAnalyzer());
Review Comment:
No need to analyze `lhs` separately.
You can just analyze `pathSelectConjunct`.
##########
fe/fe-core/src/main/java/org/apache/doris/planner/external/QueryScanProvider.java:
##########
@@ -92,6 +89,10 @@ public void createScanRangeLocations(ParamCreateContext
context, BackendPolicy b
THdfsParams tHdfsParams =
BrokerUtil.generateHdfsParam(locationProperties);
tHdfsParams.setFsName(fsName);
context.params.setHdfsParams(tHdfsParams);
+ // external data lake table
+ if (inputSplit instanceof IcebergSplit) {
Review Comment:
Why moving to here?
What if the file is on S3?
##########
be/src/vec/exec/format/table/iceberg_reader.h:
##########
@@ -55,10 +62,11 @@ class IcebergTableReader : public TableFormatReader {
RuntimeProfile* _profile;
RuntimeState* _state;
const TFileScanRangeParams& _params;
- std::vector<const FieldSchema*> _column_schemas;
+ std::set<const FieldSchema*> _column_schemas;
Review Comment:
Why using `set`?
##########
be/src/vec/exec/format/table/iceberg_reader.cpp:
##########
@@ -139,12 +159,32 @@ Status IcebergTableReader::init_row_filters() {
auto& delete_file_schema = metadata->schema();
vector<std::string> names;
- for (auto i = 0; i < delete_file_schema.size(); ++i) {
+ int num_of_col = delete_file_schema.size();
+ for (auto i = 0; i < num_of_col; ++i) {
const FieldSchema* field =
delete_file_schema.get_column(i);
- _column_schemas.emplace_back(field);
+ if (_column_schemas.size() < num_of_col) {
+ _column_schemas.emplace(field);
+ }
names.emplace_back(field->name);
}
- Status d_st = delete_reader->init_reader(names, false);
+ auto row_desc = RowDescriptor(_state->desc_tbl(),
Review Comment:
line 170-179 should be done outside the `for (auto& delete_file : files)`
##########
be/src/vec/exec/format/table/iceberg_reader.cpp:
##########
@@ -139,12 +159,32 @@ Status IcebergTableReader::init_row_filters() {
auto& delete_file_schema = metadata->schema();
vector<std::string> names;
- for (auto i = 0; i < delete_file_schema.size(); ++i) {
+ int num_of_col = delete_file_schema.size();
Review Comment:
All delete files have same schema. Maybe we can just read one of them to get
file schema.
To avoid read all files' metadata duplicately.
##########
be/src/vec/exec/format/table/iceberg_reader.cpp:
##########
@@ -110,6 +121,15 @@ void IcebergTableReader::filter_rows() {
}
}
}
+ if (!delete_row_ranges.empty()) {
Review Comment:
Use `VLOG(3)` level. And wrap the whole `if` block with `if (VLOG_IS_ON(3))`
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]