nastra commented on code in PR #7483:
URL: https://github.com/apache/iceberg/pull/7483#discussion_r1181496355
##########
spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataSourceOptions.java:
##########
@@ -242,50 +242,50 @@ public void testIncrementalScanOptions() throws
IOException {
List<Long> snapshotIds = SnapshotUtil.currentAncestorIds(table);
// start-snapshot-id and snapshot-id are both configured.
- AssertHelpers.assertThrows(
- "Check both start-snapshot-id and snapshot-id are configured",
- IllegalArgumentException.class,
- "Cannot set start-snapshot-id and end-snapshot-id for incremental
scans",
- () -> {
- spark
- .read()
- .format("iceberg")
- .option("snapshot-id", snapshotIds.get(3).toString())
- .option("start-snapshot-id", snapshotIds.get(3).toString())
- .load(tableLocation)
- .explain();
- });
+ Assertions.assertThatThrownBy(
+ () -> {
Review Comment:
would be good to remove the { } and below around the actual call
##########
spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWriterV2.java:
##########
@@ -76,18 +76,17 @@ public void testMergeSchemaFailsWithoutWriterOption()
throws Exception {
// this has a different error message than the case without
accept-any-schema because it uses
// Iceberg checks
- AssertHelpers.assertThrows(
- "Should fail when merge-schema is not enabled on the writer",
- IllegalArgumentException.class,
- "Field new_col not found in source schema",
- () -> {
- try {
- threeColDF.writeTo(tableName).append();
- } catch (NoSuchTableException e) {
- // needed because append has checked exceptions
- throw new RuntimeException(e);
- }
- });
+ Assertions.assertThatThrownBy(
+ () -> {
+ try {
Review Comment:
do we need this try-catch?
##########
spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWriterV2.java:
##########
@@ -111,18 +110,18 @@ public void testMergeSchemaWithoutAcceptAnySchema()
throws Exception {
"{ \"id\": 3, \"data\": \"c\", \"new_col\": 12.06 }",
"{ \"id\": 4, \"data\": \"d\", \"new_col\": 14.41 }");
- AssertHelpers.assertThrows(
- "Should fail when accept-any-schema is not enabled on the table",
- AnalysisException.class,
- "too many data columns",
- () -> {
- try {
- threeColDF.writeTo(tableName).option("merge-schema",
"true").append();
- } catch (NoSuchTableException e) {
- // needed because append has checked exceptions
- throw new RuntimeException(e);
- }
- });
+ Assertions.assertThatThrownBy(
+ () -> {
+ try {
Review Comment:
do we need this try-catch block?
##########
spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestRequiredDistributionAndOrdering.java:
##########
@@ -162,20 +162,22 @@ public void testDisabledDistributionAndOrdering() {
Dataset<Row> inputDF = ds.coalesce(1).sortWithinPartitions("c1");
// should fail if ordering is disabled
- AssertHelpers.assertThrowsCause(
- "Should reject writes without ordering",
- IllegalStateException.class,
- "Incoming records violate the writer assumption",
- () -> {
- try {
- inputDF
- .writeTo(tableName)
- .option(SparkWriteOptions.USE_TABLE_DISTRIBUTION_AND_ORDERING,
"false")
- .append();
- } catch (NoSuchTableException e) {
- throw new RuntimeException(e);
- }
- });
+ Assertions.assertThatThrownBy(
+ () -> {
+ try {
Review Comment:
do we need the try-catch block here?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]