rdblue commented on a change in pull request #3039:
URL: https://github.com/apache/iceberg/pull/3039#discussion_r711819113
##########
File path:
spark3/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreamingRead3.java
##########
@@ -165,6 +165,84 @@ public void
testReadStreamOnIcebergTableWithMultipleSnapshots() throws Exception
Assertions.assertThat(actual).containsExactlyInAnyOrderElementsOf(Iterables.concat(expected));
}
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testReadingStreamFromTimestamp() throws Exception {
+ List<SimpleRecord> dataBeforeTimestamp = Lists.newArrayList(
+ new SimpleRecord(-2, "minustwo"),
+ new SimpleRecord(-1, "minusone"),
+ new SimpleRecord(0, "zero"));
+ appendData(dataBeforeTimestamp, tableIdentifier, "parquet");
+
+ table.refresh();
+ long streamStartTimestamp = table.currentSnapshot().timestampMillis() + 1;
+
+ List<List<SimpleRecord>> expected = TEST_DATA_MULTIPLE_SNAPSHOTS;
+ appendDataAsMultipleSnapshots(expected, tableIdentifier);
+
+ table.refresh();
+
+ Dataset<Row> df = spark.readStream()
+ .format("iceberg")
+ .option(SparkReadOptions.STREAM_FROM_TIMESTAMP,
Long.toString(streamStartTimestamp))
+ .load(tableIdentifier);
+ List<SimpleRecord> actual = processAvailable(df);
+
+
Assertions.assertThat(actual).containsExactlyInAnyOrderElementsOf(Iterables.concat(expected));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testReadingStreamFromTimestampStartWithExistingTimestamp()
throws Exception {
+ List<SimpleRecord> dataBeforeTimestamp = Lists.newArrayList(
+ new SimpleRecord(-2, "minustwo"),
+ new SimpleRecord(-1, "minusone"),
+ new SimpleRecord(0, "zero"));
+ appendData(dataBeforeTimestamp, tableIdentifier, "parquet");
+
+ table.refresh();
+ List<List<SimpleRecord>> expected = TEST_DATA_MULTIPLE_SNAPSHOTS;
+
+ // Append the first expected data
+ appendData(expected.get(0), tableIdentifier, "parquet");
+ table.refresh();
+ long streamStartTimestamp = table.currentSnapshot().timestampMillis();
+
+ // Append rest of expected data
+ for (int i = 1; i < expected.size(); i++) {
+ appendData(expected.get(i), tableIdentifier, "parquet");
+ }
+
+ table.refresh();
+ Dataset<Row> df = spark.readStream()
+ .format("iceberg")
+ .option(SparkReadOptions.STREAM_FROM_TIMESTAMP,
Long.toString(streamStartTimestamp))
+ .load(tableIdentifier);
+ List<SimpleRecord> actual = processAvailable(df);
+
+
Assertions.assertThat(actual).containsExactlyInAnyOrderElementsOf(Iterables.concat(expected));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testReadingStreamFromTimestampGreaterThanLatestSnapshotTime()
throws Exception {
Review comment:
This needs to test new data after the stream starts as well. Currently
it validates that no processing happens if the timestamp is newer than the
current snapshot, but we also need the stream to continue processing new data.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]