Copilot commented on code in PR #1684: URL: https://github.com/apache/fluss/pull/1684#discussion_r2343462501
########## fluss-lake/fluss-lake-iceberg/src/main/java/org/apache/fluss/lake/iceberg/source/IcebergRecordReader.java: ########## @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.fluss.lake.iceberg.source; + +import org.apache.fluss.lake.source.RecordReader; +import org.apache.fluss.record.ChangeType; +import org.apache.fluss.record.GenericRecord; +import org.apache.fluss.record.LogRecord; +import org.apache.fluss.row.ProjectedRow; +import org.apache.fluss.utils.CloseableIterator; + +import org.apache.iceberg.FileScanTask; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.TableScan; +import org.apache.iceberg.data.IcebergGenericReader; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.types.Types; + +import javax.annotation.Nullable; + +import java.io.IOException; +import java.time.OffsetDateTime; +import java.time.temporal.ChronoField; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.IntStream; + +import static org.apache.fluss.metadata.TableDescriptor.OFFSET_COLUMN_NAME; +import static org.apache.fluss.metadata.TableDescriptor.TIMESTAMP_COLUMN_NAME; + +/** Iceberg record reader. */ +public class IcebergRecordReader implements RecordReader { + protected IcebergRecordAsFlussRecordIterator iterator; + protected @Nullable int[][] project; + protected Types.StructType struct; + + public IcebergRecordReader(FileScanTask fileScanTask, Table table, @Nullable int[][] project) { + TableScan tableScan = table.newScan(); + if (project != null) { + tableScan = applyProject(tableScan, project); + } + IcebergGenericReader reader = new IcebergGenericReader(tableScan, true); + struct = tableScan.schema().asStruct(); + this.iterator = new IcebergRecordAsFlussRecordIterator(reader.open(fileScanTask), struct); + } + + @Override + public CloseableIterator<LogRecord> read() throws IOException { + return iterator; + } + + private TableScan applyProject(TableScan tableScan, int[][] projects) { + Types.StructType structType = tableScan.schema().asStruct(); + List<Types.NestedField> cols = new ArrayList<>(projects.length + 2); + + for (int[] project : projects) { + // iceberg field index starts from 1 + cols.add(structType.field(project[0] + 1)); + } + + cols.add(structType.field(OFFSET_COLUMN_NAME)); + cols.add(structType.field(TIMESTAMP_COLUMN_NAME)); + return tableScan.project(new Schema(cols)); + } + + /** Iterator for iceberg record as fluss record. */ + public static class IcebergRecordAsFlussRecordIterator implements CloseableIterator<LogRecord> { + + private final org.apache.iceberg.io.CloseableIterator<Record> icebergRecordIterator; + + private final ProjectedRow projectedRow; + private final IcebergRecordAsFlussRow icebergRecordAsFlussRow; + + private final int logOffsetColIndex; + private final int timestampColIndex; + + public IcebergRecordAsFlussRecordIterator( + CloseableIterable<Record> icebergRecordIterator, Types.StructType struct) { + this.icebergRecordIterator = icebergRecordIterator.iterator(); + this.logOffsetColIndex = struct.fields().indexOf(struct.field(OFFSET_COLUMN_NAME)); + this.timestampColIndex = struct.fields().indexOf(struct.field(TIMESTAMP_COLUMN_NAME)); + + int[] project = IntStream.range(0, struct.fields().size() - 2).toArray(); + projectedRow = ProjectedRow.from(project); + icebergRecordAsFlussRow = new IcebergRecordAsFlussRow(); + } + + @Override + public void close() { + try { + icebergRecordIterator.close(); + } catch (Exception e) { + throw new RuntimeException("Fail to close iterator.", e); + } + } + + @Override + public boolean hasNext() { + return icebergRecordIterator.hasNext(); + } + + @Override + public LogRecord next() { + Record icebergRecord = icebergRecordIterator.next(); + long offset = icebergRecord.get(logOffsetColIndex, Long.class); + long timestamp = + icebergRecord + .get(timestampColIndex, OffsetDateTime.class) + .getLong(ChronoField.MILLI_OF_SECOND); Review Comment: Using MILLI_OF_SECOND will only return milliseconds within the current second (0-999), not the full timestamp in milliseconds. Should use toEpochMilli() method instead to get the complete timestamp. ```suggestion .toInstant().toEpochMilli(); ``` ########## fluss-lake/fluss-lake-iceberg/src/test/java/org/apache/fluss/lake/iceberg/testutils/FlinkIcebergTieringTestBase.java: ########## @@ -461,4 +462,32 @@ protected Map<String, List<InternalRow>> writeRowsIntoPartitionedTable( writeRows(tablePath, rows, !tableDescriptor.hasPrimaryKey()); return writtenRowsByPartition; } + + protected void waitUntilBucketSynced( + TablePath tablePath, long tableId, int bucketCount, boolean isPartition) { + if (isPartition) { + Map<Long, String> partitionById = waitUntilPartitions(tablePath); + for (Long partitionId : partitionById.keySet()) { + for (int i = 0; i < bucketCount; i++) { + TableBucket tableBucket = new TableBucket(tableId, partitionId, i); + waitUntilBucketSynced(tableBucket); + } + } + } else { + for (int i = 0; i < bucketCount; i++) { + TableBucket tableBucket = new TableBucket(tableId, i); + waitUntilBucketSynced(tableBucket); + } + } + } + + protected void waitUntilBucketSynced(TableBucket tb) { + waitUntil( + () -> { + Replica replica = getLeaderReplica(tb); + return replica.getLogTablet().getLakeTableSnapshotId() >= 0; + }, + Duration.ofMinutes(2), + "bucket " + tb + "not synced"); Review Comment: Missing space after bucket identifier. Should be 'bucket ' + tb + ' not synced'. ```suggestion "bucket " + tb + " not synced"); ``` ########## fluss-lake/fluss-lake-iceberg/src/test/java/org/apache/fluss/lake/iceberg/source/IcebergSplitSerializerTest.java: ########## @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package org.apache.fluss.lake.iceberg.source; + +import org.apache.fluss.lake.source.LakeSource; +import org.apache.fluss.metadata.TablePath; + +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Snapshot; +import org.apache.iceberg.Table; +import org.apache.iceberg.data.GenericRecord; +import org.apache.iceberg.types.Types; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +/** Test case for {@link IcebergSplitSerializer}. */ +class IcebergSplitSerializerTest extends IcebergSourceTestBase { + private final IcebergSplitSerializer serializer = new IcebergSplitSerializer(); + + @Test + void testSerializeAndDeserialize() throws Exception { + // prepare paimon table Review Comment: Comment refers to 'paimon table' but this test is for Iceberg. Should be '// prepare iceberg table'. ```suggestion // prepare iceberg table ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
