aokolnychyi commented on a change in pull request #4243: URL: https://github.com/apache/iceberg/pull/4243#discussion_r819222170
########## File path: core/src/main/java/org/apache/iceberg/BaseFilesTable.java ########## @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg; + +import java.util.List; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.ManifestEvaluator; +import org.apache.iceberg.expressions.Projections; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types.StructType; + +/** + * Base class logic for files metadata tables + */ +abstract class BaseFilesTable extends BaseMetadataTable { + + BaseFilesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + } + + @Override + public Schema schema() { + StructType partitionType = Partitioning.partitionType(table()); + Schema schema = new Schema(DataFile.getType(partitionType).fields()); + if (partitionType.fields().size() < 1) { + // avoid returning an empty struct, which is not always supported. instead, drop the partition field + return TypeUtil.selectNot(schema, Sets.newHashSet(DataFile.PARTITION_ID)); + } else { + return schema; + } + } + + abstract static class BaseFilesTableScan extends BaseMetadataTableScan { + private final Schema fileSchema; Review comment: I see this field also defined in child classes. Instead of having multiple variables, what about exposing a protected accessor method `fileSchema()` and using it in children? That way, we will only have this var defined in one place. ########## File path: core/src/main/java/org/apache/iceberg/DataFilesTable.java ########## @@ -47,78 +42,44 @@ @Override public TableScan newScan() { - return new FilesTableScan(operations(), table(), schema()); - } - - @Override - public Schema schema() { - StructType partitionType = Partitioning.partitionType(table()); - Schema schema = new Schema(DataFile.getType(partitionType).fields()); - if (partitionType.fields().size() < 1) { - // avoid returning an empty struct, which is not always supported. instead, drop the partition field - return TypeUtil.selectNot(schema, Sets.newHashSet(DataFile.PARTITION_ID)); - } else { - return schema; - } + return new DataFilesTableScan(operations(), table(), schema()); } @Override MetadataTableType metadataTableType() { return MetadataTableType.FILES; } - public static class FilesTableScan extends BaseMetadataTableScan { + public static class DataFilesTableScan extends BaseFilesTableScan { private final Schema fileSchema; - FilesTableScan(TableOperations ops, Table table, Schema fileSchema) { - super(ops, table, fileSchema); + DataFilesTableScan(TableOperations ops, Table table, Schema fileSchema) { + super(ops, table, fileSchema, MetadataTableType.FILES); this.fileSchema = fileSchema; } - private FilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, + DataFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, Review comment: nit: formatting ########## File path: core/src/main/java/org/apache/iceberg/DeleteFilesTable.java ########## @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg; + +import java.util.Map; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; + +/** + * A {@link Table} implementation that exposes a table's delete files as rows. + */ +public class DeleteFilesTable extends BaseFilesTable { + + DeleteFilesTable(TableOperations ops, Table table) { + this(ops, table, table.name() + ".delete_files"); + } + + DeleteFilesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + } + + @Override + public TableScan newScan() { + return new DeleteFilesTableScan(operations(), table(), schema()); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.DELETE_FILES; + } + + public static class DeleteFilesTableScan extends BaseFilesTableScan { + private final Schema fileSchema; Review comment: nit: same here, just access `fileSchema()` from the parent class ########## File path: core/src/main/java/org/apache/iceberg/DeleteFilesTable.java ########## @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg; + +import java.util.Map; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; + +/** + * A {@link Table} implementation that exposes a table's delete files as rows. + */ +public class DeleteFilesTable extends BaseFilesTable { + + DeleteFilesTable(TableOperations ops, Table table) { + this(ops, table, table.name() + ".delete_files"); + } + + DeleteFilesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + } + + @Override + public TableScan newScan() { + return new DeleteFilesTableScan(operations(), table(), schema()); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.DELETE_FILES; + } + + public static class DeleteFilesTableScan extends BaseFilesTableScan { + private final Schema fileSchema; + + DeleteFilesTableScan(TableOperations ops, Table table, Schema fileSchema) { + super(ops, table, fileSchema, MetadataTableType.DELETE_FILES); + this.fileSchema = fileSchema; + } + + private DeleteFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, + TableScanContext context) { + super(ops, table, schema, fileSchema, context, MetadataTableType.DELETE_FILES); + this.fileSchema = fileSchema; + } + + @Override + protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) { + return new DeleteFilesTableScan(ops, table, schema, fileSchema, context); + } + + @Override + protected CloseableIterable<FileScanTask> planFiles( Review comment: If we are to reuse `ManifestReadTask`, do you think we can move `planFiles` to the parent class as well? I guess we could just expose an abstract `manifests(args)` method and share the rest. ########## File path: core/src/main/java/org/apache/iceberg/DeleteFilesTable.java ########## @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg; + +import java.util.Map; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; + +/** + * A {@link Table} implementation that exposes a table's delete files as rows. + */ +public class DeleteFilesTable extends BaseFilesTable { + + DeleteFilesTable(TableOperations ops, Table table) { + this(ops, table, table.name() + ".delete_files"); + } + + DeleteFilesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + } + + @Override + public TableScan newScan() { + return new DeleteFilesTableScan(operations(), table(), schema()); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.DELETE_FILES; + } + + public static class DeleteFilesTableScan extends BaseFilesTableScan { + private final Schema fileSchema; + + DeleteFilesTableScan(TableOperations ops, Table table, Schema fileSchema) { + super(ops, table, fileSchema, MetadataTableType.DELETE_FILES); + this.fileSchema = fileSchema; + } + + private DeleteFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, + TableScanContext context) { + super(ops, table, schema, fileSchema, context, MetadataTableType.DELETE_FILES); + this.fileSchema = fileSchema; + } + + @Override + protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) { + return new DeleteFilesTableScan(ops, table, schema, fileSchema, context); + } + + @Override + protected CloseableIterable<FileScanTask> planFiles( + TableOperations ops, Snapshot snapshot, Expression rowFilter, + boolean ignoreResiduals, boolean caseSensitive, boolean colStats) { + CloseableIterable<ManifestFile> filtered = filterManifests(snapshot.deleteManifests(), rowFilter, caseSensitive); + + String schemaString = SchemaParser.toJson(schema()); + String specString = PartitionSpecParser.toJson(PartitionSpec.unpartitioned()); + Expression filter = ignoreResiduals ? Expressions.alwaysTrue() : rowFilter; + ResidualEvaluator residuals = ResidualEvaluator.unpartitioned(filter); + + // Data tasks produce the table schema, not the projection schema and projection is done by processing engines. + // This data task needs to use the table schema, which may not include a partition schema to avoid having an + // empty struct in the schema for unpartitioned tables. Some engines, like Spark, can't handle empty structs in + // all cases. + return CloseableIterable.transform(filtered, manifest -> + new DeleteManifestReadTask(ops.io(), ops.current().specsById(), + manifest, schema(), schemaString, specString, residuals)); + } + } + + static class DeleteManifestReadTask extends BaseFileScanTask implements DataTask { Review comment: What about making the existing task for reading manifests a little bit more generic rather than introducing a similar class? We will reuse it also in ALL_DELETE_FILES. Technically, you could move `ManifestReadTask` to `BaseFilesTable`. ``` @Override public CloseableIterable<StructLike> rows() { return CloseableIterable.transform(manifestEntries(), file -> (StructLike) file); } private CloseableIterable<? extends ContentFile<?>> manifestEntries() { switch (manifest.content()) { case DATA: return ManifestFiles.read(manifest, io, specsById).project(schema); case DELETES: return ManifestFiles.readDeleteManifest(manifest, io, specsById).project(schema); default: throw new IllegalArgumentException("Unsupported manifest content type:" + manifest.content()); } } ``` ########## File path: core/src/main/java/org/apache/iceberg/BaseFilesTable.java ########## @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg; + +import java.util.List; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.ManifestEvaluator; +import org.apache.iceberg.expressions.Projections; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types.StructType; + +/** + * Base class logic for files metadata tables + */ +abstract class BaseFilesTable extends BaseMetadataTable { + + BaseFilesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + } + + @Override + public Schema schema() { + StructType partitionType = Partitioning.partitionType(table()); + Schema schema = new Schema(DataFile.getType(partitionType).fields()); + if (partitionType.fields().size() < 1) { + // avoid returning an empty struct, which is not always supported. instead, drop the partition field + return TypeUtil.selectNot(schema, Sets.newHashSet(DataFile.PARTITION_ID)); + } else { + return schema; + } + } + + abstract static class BaseFilesTableScan extends BaseMetadataTableScan { + private final Schema fileSchema; + private final MetadataTableType type; + + protected BaseFilesTableScan(TableOperations ops, Table table, Schema fileSchema, MetadataTableType type) { + super(ops, table, fileSchema); + this.fileSchema = fileSchema; + this.type = type; + } + + protected BaseFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, + TableScanContext context, MetadataTableType type) { Review comment: nit: formatting ########## File path: core/src/main/java/org/apache/iceberg/BaseFilesTable.java ########## @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg; + +import java.util.List; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.ManifestEvaluator; +import org.apache.iceberg.expressions.Projections; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types.StructType; + +/** + * Base class logic for files metadata tables + */ +abstract class BaseFilesTable extends BaseMetadataTable { + + BaseFilesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + } + + @Override + public Schema schema() { + StructType partitionType = Partitioning.partitionType(table()); + Schema schema = new Schema(DataFile.getType(partitionType).fields()); + if (partitionType.fields().size() < 1) { + // avoid returning an empty struct, which is not always supported. instead, drop the partition field + return TypeUtil.selectNot(schema, Sets.newHashSet(DataFile.PARTITION_ID)); + } else { + return schema; + } + } + + abstract static class BaseFilesTableScan extends BaseMetadataTableScan { + private final Schema fileSchema; + private final MetadataTableType type; + + protected BaseFilesTableScan(TableOperations ops, Table table, Schema fileSchema, MetadataTableType type) { + super(ops, table, fileSchema); + this.fileSchema = fileSchema; + this.type = type; + } + + protected BaseFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, + TableScanContext context, MetadataTableType type) { + super(ops, table, schema, context); + this.fileSchema = fileSchema; + this.type = type; + } + + @Override + public TableScan appendsBetween(long fromSnapshotId, long toSnapshotId) { + throw new UnsupportedOperationException( + String.format("Cannot incrementally scan table of type %s", type.name())); + } + + @Override + public TableScan appendsAfter(long fromSnapshotId) { + throw new UnsupportedOperationException( + String.format("Cannot incrementally scan table of type %s", type.name())); + } + + protected CloseableIterable<ManifestFile> filterManifests(List<ManifestFile> manifests, + Expression rowFilter, + boolean caseSensitive) { + CloseableIterable<ManifestFile> manifestIterable = CloseableIterable.withNoopClose(manifests); + + // use an inclusive projection to remove the partition name prefix and filter out any non-partition expressions + Expression partitionFilter = Projections + .inclusive( + transformSpec(fileSchema, table().spec(), PARTITION_FIELD_PREFIX), + caseSensitive) + .project(rowFilter); + + ManifestEvaluator manifestEval = ManifestEvaluator.forPartitionFilter( Review comment: It is not related to this PR but this logic may not be correct. We should create an issue and follow up. We always pass the current spec. Instead, we should use the spec to which our particular manifest belongs to. ########## File path: core/src/main/java/org/apache/iceberg/DeleteFilesTable.java ########## @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg; + +import java.util.Map; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; + +/** + * A {@link Table} implementation that exposes a table's delete files as rows. + */ +public class DeleteFilesTable extends BaseFilesTable { + + DeleteFilesTable(TableOperations ops, Table table) { + this(ops, table, table.name() + ".delete_files"); + } + + DeleteFilesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + } + + @Override + public TableScan newScan() { + return new DeleteFilesTableScan(operations(), table(), schema()); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.DELETE_FILES; + } + + public static class DeleteFilesTableScan extends BaseFilesTableScan { + private final Schema fileSchema; + + DeleteFilesTableScan(TableOperations ops, Table table, Schema fileSchema) { + super(ops, table, fileSchema, MetadataTableType.DELETE_FILES); + this.fileSchema = fileSchema; + } + + private DeleteFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, + TableScanContext context) { + super(ops, table, schema, fileSchema, context, MetadataTableType.DELETE_FILES); + this.fileSchema = fileSchema; + } + + @Override + protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) { + return new DeleteFilesTableScan(ops, table, schema, fileSchema, context); + } + + @Override + protected CloseableIterable<FileScanTask> planFiles( Review comment: In the future, we may want to migrate `AllDataFilesTable` to use the same parent class. ########## File path: core/src/main/java/org/apache/iceberg/DeleteFilesTable.java ########## @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg; + +import java.util.Map; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; + +/** + * A {@link Table} implementation that exposes a table's delete files as rows. + */ +public class DeleteFilesTable extends BaseFilesTable { + + DeleteFilesTable(TableOperations ops, Table table) { + this(ops, table, table.name() + ".delete_files"); + } + + DeleteFilesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + } + + @Override + public TableScan newScan() { + return new DeleteFilesTableScan(operations(), table(), schema()); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.DELETE_FILES; + } + + public static class DeleteFilesTableScan extends BaseFilesTableScan { + private final Schema fileSchema; + + DeleteFilesTableScan(TableOperations ops, Table table, Schema fileSchema) { + super(ops, table, fileSchema, MetadataTableType.DELETE_FILES); + this.fileSchema = fileSchema; + } + + private DeleteFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, + TableScanContext context) { Review comment: nit: formatting -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
