This is an automated email from the ASF dual-hosted git repository.
etudenhoefner pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git
The following commit(s) were added to refs/heads/master by this push:
new 3a9d6508a6 Core: Switch tests to JUnit5 in rest/hadoop pakages (#7861)
3a9d6508a6 is described below
commit 3a9d6508a646ed70b0714a9663f6738bf18c134a
Author: Song Minseok <[email protected]>
AuthorDate: Fri Jun 23 14:34:11 2023 +0900
Core: Switch tests to JUnit5 in rest/hadoop pakages (#7861)
---
.../apache/iceberg/hadoop/HadoopFileIOTest.java | 53 +++--
.../apache/iceberg/hadoop/HadoopTableTestBase.java | 16 +-
.../apache/iceberg/hadoop/TestCachingCatalog.java | 73 +++---
.../iceberg/hadoop/TestCatalogUtilDropTable.java | 50 ++--
.../apache/iceberg/hadoop/TestHadoopCatalog.java | 223 +++++++++---------
.../apache/iceberg/hadoop/TestHadoopCommits.java | 256 ++++++++++-----------
.../apache/iceberg/hadoop/TestHadoopTables.java | 68 +++---
.../org/apache/iceberg/hadoop/TestStaticTable.java | 37 ++-
.../iceberg/hadoop/TestTableSerialization.java | 12 +-
.../iceberg/rest/RequestResponseTestBase.java | 9 +-
.../org/apache/iceberg/rest/TestHTTPClient.java | 18 +-
.../org/apache/iceberg/rest/TestRESTCatalog.java | 56 ++---
.../java/org/apache/iceberg/rest/TestRESTUtil.java | 2 +-
.../org/apache/iceberg/rest/TestResourcePaths.java | 71 +++---
14 files changed, 471 insertions(+), 473 deletions(-)
diff --git a/core/src/test/java/org/apache/iceberg/hadoop/HadoopFileIOTest.java
b/core/src/test/java/org/apache/iceberg/hadoop/HadoopFileIOTest.java
index 818dfaabc6..636c94069d 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/HadoopFileIOTest.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/HadoopFileIOTest.java
@@ -18,9 +18,6 @@
*/
package org.apache.iceberg.hadoop;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
@@ -38,7 +35,7 @@ import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Streams;
-import org.junit.Assert;
+import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -72,14 +69,15 @@ public class HadoopFileIOTest {
Path scalePath = new Path(parent, Integer.toString(scale));
createRandomFiles(scalePath, scale);
- assertEquals(
- (long) scale,
-
Streams.stream(hadoopFileIO.listPrefix(scalePath.toUri().toString())).count());
+ Assertions.assertThat(
+
Streams.stream(hadoopFileIO.listPrefix(scalePath.toUri().toString())).count())
+ .isEqualTo((long) scale);
});
long totalFiles = scaleSizes.stream().mapToLong(Integer::longValue).sum();
- assertEquals(
- totalFiles,
Streams.stream(hadoopFileIO.listPrefix(parent.toUri().toString())).count());
+ Assertions.assertThat(
+
Streams.stream(hadoopFileIO.listPrefix(parent.toUri().toString())).count())
+ .isEqualTo(totalFiles);
}
@Test
@@ -89,10 +87,11 @@ public class HadoopFileIOTest {
fs.createNewFile(randomFilePath);
// check existence of the created file
-
Assert.assertTrue(hadoopFileIO.newInputFile(randomFilePath.toUri().toString()).exists());
-
+
Assertions.assertThat(hadoopFileIO.newInputFile(randomFilePath.toUri().toString()).exists())
+ .isTrue();
fs.delete(randomFilePath, false);
-
Assert.assertFalse(hadoopFileIO.newInputFile(randomFilePath.toUri().toString()).exists());
+
Assertions.assertThat(hadoopFileIO.newInputFile(randomFilePath.toUri().toString()).exists())
+ .isFalse();
}
@Test
@@ -111,16 +110,18 @@ public class HadoopFileIOTest {
hadoopFileIO.deletePrefix(scalePath.toUri().toString());
// Hadoop filesystem will throw if the path does not exist
- assertThrows(
- UncheckedIOException.class,
- () ->
hadoopFileIO.listPrefix(scalePath.toUri().toString()).iterator());
+ Assertions.assertThatThrownBy(
+ () ->
hadoopFileIO.listPrefix(scalePath.toUri().toString()).iterator())
+ .isInstanceOf(UncheckedIOException.class)
+ .hasMessageContaining("java.io.FileNotFoundException");
});
hadoopFileIO.deletePrefix(parent.toUri().toString());
// Hadoop filesystem will throw if the path does not exist
- assertThrows(
- UncheckedIOException.class,
- () -> hadoopFileIO.listPrefix(parent.toUri().toString()).iterator());
+ Assertions.assertThatThrownBy(
+ () ->
hadoopFileIO.listPrefix(parent.toUri().toString()).iterator())
+ .isInstanceOf(UncheckedIOException.class)
+ .hasMessageContaining("java.io.FileNotFoundException");
}
@Test
@@ -130,17 +131,17 @@ public class HadoopFileIOTest {
hadoopFileIO.deleteFiles(
filesCreated.stream().map(Path::toString).collect(Collectors.toList()));
filesCreated.forEach(
- file ->
Assert.assertFalse(hadoopFileIO.newInputFile(file.toString()).exists()));
+ file ->
+
Assertions.assertThat(hadoopFileIO.newInputFile(file.toString()).exists()).isFalse());
}
@Test
public void testDeleteFilesErrorHandling() {
List<String> filesCreated =
random.ints(2).mapToObj(x -> "fakefsnotreal://file-" +
x).collect(Collectors.toList());
- Assert.assertThrows(
- "Should throw a BulkDeletionFailure Exceptions when files can't be
deleted",
- BulkDeletionFailureException.class,
- () -> hadoopFileIO.deleteFiles(filesCreated));
+ Assertions.assertThatThrownBy(() -> hadoopFileIO.deleteFiles(filesCreated))
+ .isInstanceOf(BulkDeletionFailureException.class)
+ .hasMessage("Failed to delete 2 files");
}
@Test
@@ -151,7 +152,8 @@ public class HadoopFileIOTest {
testHadoopFileIO.initialize(ImmutableMap.of("k1", "v1"));
FileIO roundTripSerializedFileIO =
TestHelpers.KryoHelpers.roundTripSerialize(testHadoopFileIO);
- Assert.assertEquals(testHadoopFileIO.properties(),
roundTripSerializedFileIO.properties());
+ Assertions.assertThat(roundTripSerializedFileIO.properties())
+ .isEqualTo(testHadoopFileIO.properties());
}
@Test
@@ -162,7 +164,8 @@ public class HadoopFileIOTest {
testHadoopFileIO.initialize(ImmutableMap.of("k1", "v1"));
FileIO roundTripSerializedFileIO =
TestHelpers.roundTripSerialize(testHadoopFileIO);
- Assert.assertEquals(testHadoopFileIO.properties(),
roundTripSerializedFileIO.properties());
+ Assertions.assertThat(roundTripSerializedFileIO.properties())
+ .isEqualTo(testHadoopFileIO.properties());
}
private List<Path> createRandomFiles(Path parent, int count) {
diff --git
a/core/src/test/java/org/apache/iceberg/hadoop/HadoopTableTestBase.java
b/core/src/test/java/org/apache/iceberg/hadoop/HadoopTableTestBase.java
index 5774fbd432..b8f7dcb80e 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/HadoopTableTestBase.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/HadoopTableTestBase.java
@@ -48,9 +48,8 @@ import
org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.io.Files;
import org.apache.iceberg.types.Types;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
public class HadoopTableTestBase {
// Schema passed to create tables
@@ -113,19 +112,16 @@ public class HadoopTableTestBase {
.withRecordCount(2) // needs at least one record or else metrics
will filter it out
.build();
- @Rule public TemporaryFolder temp = new TemporaryFolder();
+ @TempDir File tempDir;
+ @TempDir File tableDir;
- File tableDir = null;
String tableLocation = null;
File metadataDir = null;
File versionHintFile = null;
Table table = null;
- @Before
+ @BeforeEach
public void setupTable() throws Exception {
- this.tableDir = temp.newFolder();
- tableDir.delete(); // created by table create
-
this.tableLocation = tableDir.toURI().toString();
this.metadataDir = new File(tableDir, "metadata");
this.versionHintFile = new File(metadataDir, "version-hint.text");
@@ -197,7 +193,7 @@ public class HadoopTableTestBase {
"hadoop",
ImmutableMap.<String, String>builder()
.putAll(catalogProperties)
- .put(CatalogProperties.WAREHOUSE_LOCATION,
temp.newFolder().getAbsolutePath())
+ .put(CatalogProperties.WAREHOUSE_LOCATION,
tempDir.getAbsolutePath())
.buildOrThrow());
return hadoopCatalog;
}
diff --git
a/core/src/test/java/org/apache/iceberg/hadoop/TestCachingCatalog.java
b/core/src/test/java/org/apache/iceberg/hadoop/TestCachingCatalog.java
index 5e323a5ffe..31b41434ec 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/TestCachingCatalog.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/TestCachingCatalog.java
@@ -42,10 +42,9 @@ import
org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.util.FakeTicker;
import org.assertj.core.api.Assertions;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestCachingCatalog extends HadoopTableTestBase {
@@ -54,12 +53,12 @@ public class TestCachingCatalog extends HadoopTableTestBase
{
private FakeTicker ticker;
- @Before
+ @BeforeEach
public void beforeEach() {
this.ticker = new FakeTicker();
}
- @After
+ @AfterEach
public void afterEach() {
this.ticker = null;
}
@@ -87,15 +86,15 @@ public class TestCachingCatalog extends HadoopTableTestBase
{
Table manifestsMetaTable2 = catalog.loadTable(manifestsMetaTableIdent);
// metadata tables are cached
- Assert.assertEquals(filesMetaTable2, filesMetaTable);
- Assert.assertEquals(manifestsMetaTable2, manifestsMetaTable);
+ Assertions.assertThat(filesMetaTable2).isEqualTo(filesMetaTable);
+ Assertions.assertThat(manifestsMetaTable2).isEqualTo(manifestsMetaTable);
// the current snapshot of origin table is updated after committing
- Assert.assertNotEquals(table.currentSnapshot(), oldSnapshot);
+ Assertions.assertThat(table.currentSnapshot()).isNotEqualTo(oldSnapshot);
// underlying table operation in metadata tables are shared with the
origin table
- Assert.assertEquals(filesMetaTable2.currentSnapshot(),
table.currentSnapshot());
- Assert.assertEquals(manifestsMetaTable2.currentSnapshot(),
table.currentSnapshot());
+
Assertions.assertThat(filesMetaTable2.currentSnapshot()).isEqualTo(table.currentSnapshot());
+
Assertions.assertThat(manifestsMetaTable2.currentSnapshot()).isEqualTo(table.currentSnapshot());
}
@Test
@@ -129,18 +128,22 @@ public class TestCachingCatalog extends
HadoopTableTestBase {
// remember the new snapshot
Snapshot newSnapshot = table.currentSnapshot();
- Assert.assertNotEquals("Snapshots must be different", oldSnapshot,
newSnapshot);
+ Assertions.assertThat(newSnapshot).as("Snapshots must be
different").isNotEqualTo(oldSnapshot);
// validate metadata tables were correctly invalidated
for (MetadataTableType type : MetadataTableType.values()) {
TableIdentifier metadataIdent1 = TableIdentifier.parse(tableIdent + "."
+ type.name());
Table metadataTable1 = catalog.loadTable(metadataIdent1);
- Assert.assertEquals("Snapshot must be new", newSnapshot,
metadataTable1.currentSnapshot());
+ Assertions.assertThat(metadataTable1.currentSnapshot())
+ .as("Snapshot must be new")
+ .isEqualTo(newSnapshot);
TableIdentifier metadataIdent2 =
TableIdentifier.parse(tableIdent + "." +
type.name().toLowerCase(Locale.ROOT));
Table metadataTable2 = catalog.loadTable(metadataIdent2);
- Assert.assertEquals("Snapshot must be new", newSnapshot,
metadataTable2.currentSnapshot());
+ Assertions.assertThat(metadataTable2.currentSnapshot())
+ .as("Snapshot must be new")
+ .isEqualTo(newSnapshot);
}
}
@@ -151,13 +154,14 @@ public class TestCachingCatalog extends
HadoopTableTestBase {
catalog.createTable(tableIdent, SCHEMA, SPEC, ImmutableMap.of("key2",
"value2"));
Table table = catalog.loadTable(tableIdent);
- Assert.assertEquals("Name must match", "hadoop.db.ns1.ns2.tbl",
table.name());
+ Assertions.assertThat(table.name()).as("Name must
match").isEqualTo("hadoop.db.ns1.ns2.tbl");
TableIdentifier snapshotsTableIdent =
TableIdentifier.of("db", "ns1", "ns2", "tbl", "snapshots");
Table snapshotsTable = catalog.loadTable(snapshotsTableIdent);
- Assert.assertEquals(
- "Name must match", "hadoop.db.ns1.ns2.tbl.snapshots",
snapshotsTable.name());
+ Assertions.assertThat(snapshotsTable.name())
+ .as("Name must match")
+ .isEqualTo("hadoop.db.ns1.ns2.tbl.snapshots");
}
@Test
@@ -185,10 +189,9 @@ public class TestCachingCatalog extends
HadoopTableTestBase {
ticker.advance(HALF_OF_EXPIRATION.plus(Duration.ofSeconds(10)));
Assertions.assertThat(catalog.cache().asMap()).doesNotContainKey(tableIdent);
- Assert.assertNotSame(
- "CachingCatalog should return a new instance after expiration",
- table,
- catalog.loadTable(tableIdent));
+ Assertions.assertThat(catalog.loadTable(tableIdent))
+ .as("CachingCatalog should return a new instance after expiration")
+ .isNotSameAs(table);
}
@Test
@@ -267,10 +270,9 @@ public class TestCachingCatalog extends
HadoopTableTestBase {
.isNotEmpty()
.allMatch(age -> age.isPresent() && age.get().equals(Duration.ZERO));
- Assert.assertEquals(
- "Loading a non-cached metadata table should refresh the main table's
age",
- Optional.of(EXPIRATION_TTL),
- catalog.remainingAgeFor(tableIdent));
+ Assertions.assertThat(catalog.remainingAgeFor(tableIdent))
+ .as("Loading a non-cached metadata table should refresh the main
table's age")
+ .isEqualTo(Optional.of(EXPIRATION_TTL));
// Move time forward and access already cached metadata tables.
ticker.advance(HALF_OF_EXPIRATION);
@@ -279,10 +281,9 @@ public class TestCachingCatalog extends
HadoopTableTestBase {
.isNotEmpty()
.allMatch(age -> age.isPresent() && age.get().equals(Duration.ZERO));
- Assert.assertEquals(
- "Accessing a cached metadata table should not affect the main table's
age",
- Optional.of(HALF_OF_EXPIRATION),
- catalog.remainingAgeFor(tableIdent));
+ Assertions.assertThat(catalog.remainingAgeFor(tableIdent))
+ .as("Accessing a cached metadata table should not affect the main
table's age")
+ .isEqualTo(Optional.of(HALF_OF_EXPIRATION));
// Move time forward so the data table drops.
ticker.advance(HALF_OF_EXPIRATION);
@@ -291,9 +292,10 @@ public class TestCachingCatalog extends
HadoopTableTestBase {
Arrays.stream(metadataTables(tableIdent))
.forEach(
metadataTable ->
- Assert.assertFalse(
- "When a data table expires, its metadata tables should
expire regardless of age",
- catalog.cache().asMap().containsKey(metadataTable)));
+ Assertions.assertThat(catalog.cache().asMap())
+ .as(
+ "When a data table expires, its metadata tables should
expire regardless of age")
+ .doesNotContainKeys(metadataTable));
}
@Test
@@ -357,9 +359,10 @@ public class TestCachingCatalog extends
HadoopTableTestBase {
Duration.ofMillis(CatalogProperties.CACHE_EXPIRATION_INTERVAL_MS_OFF),
ticker);
- Assert.assertFalse(
- "When a negative value is used as the expiration interval, the cache
should not expire entries based on a TTL",
- catalog.isCacheExpirationEnabled());
+ Assertions.assertThat(catalog.isCacheExpirationEnabled())
+ .as(
+ "When a negative value is used as the expiration interval, the
cache should not expire entries based on a TTL")
+ .isFalse();
}
@Test
diff --git
a/core/src/test/java/org/apache/iceberg/hadoop/TestCatalogUtilDropTable.java
b/core/src/test/java/org/apache/iceberg/hadoop/TestCatalogUtilDropTable.java
index c61bb3da63..478ac3a8c2 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/TestCatalogUtilDropTable.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/TestCatalogUtilDropTable.java
@@ -29,8 +29,8 @@ import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
-import org.junit.Assert;
-import org.junit.Test;
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatchers;
import org.mockito.Mockito;
@@ -49,8 +49,8 @@ public class TestCatalogUtilDropTable extends
HadoopTableTestBase {
Set<String> manifestLocations = manifestLocations(snapshotSet, table.io());
Set<String> dataLocations = dataLocations(snapshotSet, table.io());
Set<String> metadataLocations = metadataLocations(tableMetadata);
- Assert.assertEquals("should have 2 manifest lists", 2,
manifestListLocations.size());
- Assert.assertEquals("should have 3 metadata locations", 3,
metadataLocations.size());
+ Assertions.assertThat(manifestListLocations).as("should have 2 manifest
lists").hasSize(2);
+ Assertions.assertThat(metadataLocations).as("should have 3 metadata
locations").hasSize(3);
FileIO fileIO = Mockito.mock(FileIO.class);
Mockito.when(fileIO.newInputFile(Mockito.anyString()))
@@ -73,15 +73,18 @@ public class TestCatalogUtilDropTable extends
HadoopTableTestBase {
.deleteFile(argumentCaptor.capture());
List<String> deletedPaths = argumentCaptor.getAllValues();
- Assert.assertTrue(
- "should contain all created manifest lists",
- deletedPaths.containsAll(manifestListLocations));
- Assert.assertTrue(
- "should contain all created manifests",
deletedPaths.containsAll(manifestLocations));
- Assert.assertTrue("should contain all created data",
deletedPaths.containsAll(dataLocations));
- Assert.assertTrue(
- "should contain all created metadata locations",
- deletedPaths.containsAll(metadataLocations));
+ Assertions.assertThat(deletedPaths)
+ .as("should contain all created manifest lists")
+ .containsAll(manifestListLocations);
+ Assertions.assertThat(deletedPaths)
+ .as("should contain all created manifests")
+ .containsAll(manifestLocations);
+ Assertions.assertThat(deletedPaths)
+ .as("should contain all created data")
+ .containsAll(dataLocations);
+ Assertions.assertThat(deletedPaths)
+ .as("should contain all created metadata locations")
+ .containsAll(metadataLocations);
}
@Test
@@ -124,8 +127,8 @@ public class TestCatalogUtilDropTable extends
HadoopTableTestBase {
Set<String> manifestListLocations = manifestListLocations(snapshotSet);
Set<String> manifestLocations = manifestLocations(snapshotSet, table.io());
Set<String> metadataLocations = metadataLocations(tableMetadata);
- Assert.assertEquals("should have 2 manifest lists", 2,
manifestListLocations.size());
- Assert.assertEquals("should have 4 metadata locations", 4,
metadataLocations.size());
+ Assertions.assertThat(manifestListLocations).as("should have 2 manifest
lists").hasSize(2);
+ Assertions.assertThat(metadataLocations).as("should have 4 metadata
locations").hasSize(4);
FileIO fileIO = Mockito.mock(FileIO.class);
Mockito.when(fileIO.newInputFile(Mockito.anyString()))
@@ -141,14 +144,15 @@ public class TestCatalogUtilDropTable extends
HadoopTableTestBase {
.deleteFile(argumentCaptor.capture());
List<String> deletedPaths = argumentCaptor.getAllValues();
- Assert.assertTrue(
- "should contain all created manifest lists",
- deletedPaths.containsAll(manifestListLocations));
- Assert.assertTrue(
- "should contain all created manifests",
deletedPaths.containsAll(manifestLocations));
- Assert.assertTrue(
- "should contain all created metadata locations",
- deletedPaths.containsAll(metadataLocations));
+ Assertions.assertThat(deletedPaths)
+ .as("should contain all created manifest lists")
+ .containsAll(manifestListLocations);
+ Assertions.assertThat(deletedPaths)
+ .as("should contain all created manifests")
+ .containsAll(manifestLocations);
+ Assertions.assertThat(deletedPaths)
+ .as("should contain all created metadata locations")
+ .containsAll(metadataLocations);
}
private Set<String> manifestListLocations(Set<Snapshot> snapshotSet) {
diff --git
a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCatalog.java
b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCatalog.java
index 00c3058a4c..4f889b24ca 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCatalog.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCatalog.java
@@ -51,8 +51,7 @@ import
org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.transforms.Transform;
import org.apache.iceberg.transforms.Transforms;
import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestHadoopCatalog extends HadoopTableTestBase {
private static ImmutableMap<String, String> meta = ImmutableMap.of();
@@ -69,10 +68,11 @@ public class TestHadoopCatalog extends HadoopTableTestBase {
.withProperties(ImmutableMap.of("key2", "value2"))
.create();
- Assert.assertEquals(TABLE_SCHEMA.toString(), table.schema().toString());
- Assert.assertEquals(1, table.spec().fields().size());
- Assert.assertEquals("value1", table.properties().get("key1"));
- Assert.assertEquals("value2", table.properties().get("key2"));
+
Assertions.assertThat(table.schema().toString()).isEqualTo(TABLE_SCHEMA.toString());
+ Assertions.assertThat(table.spec().fields()).hasSize(1);
+ Assertions.assertThat(table.properties())
+ .containsEntry("key1", "value1")
+ .containsEntry("key2", "value2");
}
@Test
@@ -84,8 +84,8 @@ public class TestHadoopCatalog extends HadoopTableTestBase {
txn.commitTransaction();
Table table = catalog.loadTable(tableIdent);
- Assert.assertEquals(TABLE_SCHEMA.toString(), table.schema().toString());
- Assert.assertTrue(table.spec().isUnpartitioned());
+
Assertions.assertThat(table.schema().toString()).isEqualTo(TABLE_SCHEMA.toString());
+ Assertions.assertThat(table.spec().isUnpartitioned()).isTrue();
}
@Test
@@ -105,23 +105,26 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
createTxn.commitTransaction();
Table table = catalog.loadTable(tableIdent);
- Assert.assertNotNull(table.currentSnapshot());
+ Assertions.assertThat(table.currentSnapshot()).isNotNull();
Transaction replaceTxn =
catalog.buildTable(tableIdent, SCHEMA).withProperty("key2",
"value2").replaceTransaction();
replaceTxn.commitTransaction();
table = catalog.loadTable(tableIdent);
- Assert.assertNull(table.currentSnapshot());
+ Assertions.assertThat(table.currentSnapshot()).isNull();
PartitionSpec v1Expected =
PartitionSpec.builderFor(table.schema())
.alwaysNull("data", "data_bucket")
.withSpecId(1)
.build();
- Assert.assertEquals("Table should have a spec with one void field",
v1Expected, table.spec());
+ Assertions.assertThat(table.spec())
+ .as("Table should have a spec with one void field")
+ .isEqualTo(v1Expected);
- Assert.assertEquals("value1", table.properties().get("key1"));
- Assert.assertEquals("value2", table.properties().get("key2"));
+ Assertions.assertThat(table.properties())
+ .containsEntry("key1", "value1")
+ .containsEntry("key2", "value2");
}
@Test
@@ -155,8 +158,8 @@ public class TestHadoopCatalog extends HadoopTableTestBase {
Table table = hadoopCatalog().createTable(tableIdent, SCHEMA, SPEC);
SortOrder sortOrder = table.sortOrder();
- Assert.assertEquals("Order ID must match", 0, sortOrder.orderId());
- Assert.assertTrue("Order must unsorted", sortOrder.isUnsorted());
+ Assertions.assertThat(sortOrder.orderId()).as("Order ID must
match").isEqualTo(0);
+ Assertions.assertThat(sortOrder.isUnsorted()).as("Order must be
unsorted").isTrue();
}
@Test
@@ -171,13 +174,18 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
.create();
SortOrder sortOrder = table.sortOrder();
- Assert.assertEquals("Order ID must match", 1, sortOrder.orderId());
- Assert.assertEquals("Order must have 1 field", 1,
sortOrder.fields().size());
- Assert.assertEquals("Direction must match ", ASC,
sortOrder.fields().get(0).direction());
- Assert.assertEquals(
- "Null order must match ", NULLS_FIRST,
sortOrder.fields().get(0).nullOrder());
+ Assertions.assertThat(sortOrder.orderId()).as("Order ID must
match").isEqualTo(1);
+ Assertions.assertThat(sortOrder.fields().size()).as("Order must have 1
field").isEqualTo(1);
+ Assertions.assertThat(sortOrder.fields().get(0).direction())
+ .as("Direction must match")
+ .isEqualTo(ASC);
+ Assertions.assertThat(sortOrder.fields().get(0).nullOrder())
+ .as("Null order must match")
+ .isEqualTo(NULLS_FIRST);
Transform<?, ?> transform = Transforms.identity();
- Assert.assertEquals("Transform must match", transform,
sortOrder.fields().get(0).transform());
+ Assertions.assertThat(sortOrder.fields().get(0).transform())
+ .as("Transform must match")
+ .isEqualTo(transform);
}
@Test
@@ -188,10 +196,10 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
String metaLocation = catalog.defaultWarehouseLocation(testTable);
FileSystem fs = Util.getFs(new Path(metaLocation), catalog.getConf());
- Assert.assertTrue(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isTrue();
catalog.dropTable(testTable);
- Assert.assertFalse(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isFalse();
}
@Test
@@ -201,15 +209,15 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
TableIdentifier testTable = TableIdentifier.of("tbl");
Table table = catalog.createTable(testTable, SCHEMA,
PartitionSpec.unpartitioned());
- Assert.assertEquals(table.schema().toString(), TABLE_SCHEMA.toString());
- Assert.assertEquals("hadoop.tbl", table.name());
+
Assertions.assertThat(table.schema().toString()).isEqualTo(TABLE_SCHEMA.toString());
+ Assertions.assertThat(table.name()).isEqualTo("hadoop.tbl");
String metaLocation = catalog.defaultWarehouseLocation(testTable);
FileSystem fs = Util.getFs(new Path(metaLocation), catalog.getConf());
- Assert.assertTrue(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isTrue();
catalog.dropTable(testTable);
- Assert.assertFalse(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isFalse();
}
@Test
@@ -220,10 +228,10 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
String metaLocation = catalog.defaultWarehouseLocation(testTable);
FileSystem fs = Util.getFs(new Path(metaLocation), catalog.getConf());
- Assert.assertTrue(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isTrue();
catalog.dropTable(testTable);
- Assert.assertFalse(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isFalse();
}
@Test
@@ -232,14 +240,14 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
TableIdentifier testTable = TableIdentifier.of("db", "ns1", "ns2", "tbl");
String metaLocation = catalog.defaultWarehouseLocation(testTable);
// testing with non existent directory
- Assert.assertFalse(catalog.dropTable(testTable));
+ Assertions.assertThat(catalog.dropTable(testTable)).isFalse();
FileSystem fs = Util.getFs(new Path(metaLocation), catalog.getConf());
fs.mkdirs(new Path(metaLocation));
- Assert.assertTrue(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isTrue();
- Assert.assertFalse(catalog.dropTable(testTable));
- Assert.assertTrue(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(catalog.dropTable(testTable)).isFalse();
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isTrue();
}
@Test
@@ -267,13 +275,11 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
List<TableIdentifier> tbls1 = catalog.listTables(Namespace.of("db"));
Set<String> tblSet = Sets.newHashSet(tbls1.stream().map(t ->
t.name()).iterator());
- Assert.assertEquals(2, tblSet.size());
- Assert.assertTrue(tblSet.contains("tbl1"));
- Assert.assertTrue(tblSet.contains("tbl2"));
+ Assertions.assertThat(tblSet).hasSize(2).contains("tbl1").contains("tbl2");
List<TableIdentifier> tbls2 = catalog.listTables(Namespace.of("db",
"ns1"));
- Assert.assertEquals("table identifiers", 1, tbls2.size());
- Assert.assertEquals("table name", "tbl3", tbls2.get(0).name());
+ Assertions.assertThat(tbls2).hasSize(1);
+ Assertions.assertThat(tbls2.get(0).name()).isEqualTo("tbl3");
Assertions.assertThatThrownBy(() -> catalog.listTables(Namespace.of("db",
"ns1", "ns2")))
.isInstanceOf(NoSuchNamespaceException.class)
@@ -289,14 +295,15 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
create.table().locationProvider(); // NPE triggered if not handled
appropriately
create.commitTransaction();
- Assert.assertEquals(
- "1 table expected", 1, catalog.listTables(Namespace.of("ns1",
"ns2")).size());
+ Assertions.assertThat(catalog.listTables(Namespace.of("ns1", "ns2")))
+ .as("1 table expected")
+ .hasSize(1);
catalog.dropTable(tableIdent, true);
}
@Test
public void testCreateNamespace() throws Exception {
- String warehouseLocation = temp.newFolder().getAbsolutePath();
+ String warehouseLocation = tableDir.getAbsolutePath();
HadoopCatalog catalog = new HadoopCatalog();
catalog.setConf(new Configuration());
catalog.initialize(
@@ -309,14 +316,14 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
String metaLocation1 = warehouseLocation + "/" + "db/ns1/ns2";
FileSystem fs1 = Util.getFs(new Path(metaLocation1), catalog.getConf());
- Assert.assertTrue(fs1.isDirectory(new Path(metaLocation1)));
+ Assertions.assertThat(fs1.isDirectory(new Path(metaLocation1))).isTrue();
String metaLocation2 = warehouseLocation + "/" + "db/ns2/ns3";
FileSystem fs2 = Util.getFs(new Path(metaLocation2), catalog.getConf());
- Assert.assertTrue(fs2.isDirectory(new Path(metaLocation2)));
+ Assertions.assertThat(fs2.isDirectory(new Path(metaLocation2))).isTrue();
Assertions.assertThatThrownBy(() ->
catalog.createNamespace(tbl1.namespace()))
-
.isInstanceOf(org.apache.iceberg.exceptions.AlreadyExistsException.class)
+ .isInstanceOf(AlreadyExistsException.class)
.hasMessage("Namespace already exists: " + tbl1.namespace());
}
@@ -335,26 +342,23 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
List<Namespace> nsp1 = catalog.listNamespaces(Namespace.of("db"));
Set<String> tblSet = Sets.newHashSet(nsp1.stream().map(t ->
t.toString()).iterator());
- Assert.assertEquals(3, tblSet.size());
- Assert.assertTrue(tblSet.contains("db.ns1"));
- Assert.assertTrue(tblSet.contains("db.ns2"));
- Assert.assertTrue(tblSet.contains("db.ns3"));
+ Assertions.assertThat(tblSet)
+ .hasSize(3)
+ .contains("db.ns1")
+ .contains("db.ns2")
+ .contains("db.ns3");
List<Namespace> nsp2 = catalog.listNamespaces(Namespace.of("db", "ns1"));
- Assert.assertEquals(1, nsp2.size());
- Assert.assertTrue(nsp2.get(0).toString().equals("db.ns1.ns2"));
+ Assertions.assertThat(nsp2).hasSize(1);
+ Assertions.assertThat(nsp2.get(0).toString()).isEqualTo("db.ns1.ns2");
List<Namespace> nsp3 = catalog.listNamespaces();
Set<String> tblSet2 = Sets.newHashSet(nsp3.stream().map(t ->
t.toString()).iterator());
- Assert.assertEquals(2, tblSet2.size());
- Assert.assertTrue(tblSet2.contains("db"));
- Assert.assertTrue(tblSet2.contains("db2"));
+ Assertions.assertThat(tblSet2).hasSize(2).contains("db").contains("db2");
List<Namespace> nsp4 = catalog.listNamespaces();
Set<String> tblSet3 = Sets.newHashSet(nsp4.stream().map(t ->
t.toString()).iterator());
- Assert.assertEquals(2, tblSet3.size());
- Assert.assertTrue(tblSet3.contains("db"));
- Assert.assertTrue(tblSet3.contains("db2"));
+ Assertions.assertThat(tblSet3).hasSize(2).contains("db").contains("db2");
Assertions.assertThatThrownBy(() ->
catalog.listNamespaces(Namespace.of("db", "db2", "ns2")))
.isInstanceOf(NoSuchNamespaceException.class)
@@ -391,12 +395,12 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
Lists.newArrayList(tbl1, tbl2, tbl3, tbl4)
.forEach(t -> catalog.createTable(t, SCHEMA,
PartitionSpec.unpartitioned()));
- Assert.assertTrue(
- "Should true to namespace exist",
- catalog.namespaceExists(Namespace.of("db", "ns1", "ns2")));
- Assert.assertTrue(
- "Should false to namespace doesn't exist",
- !catalog.namespaceExists(Namespace.of("db", "db2", "ns2")));
+ Assertions.assertThat(catalog.namespaceExists(Namespace.of("db", "ns1",
"ns2")))
+ .as("Should be true as namespace exists")
+ .isTrue();
+ Assertions.assertThat(catalog.namespaceExists(Namespace.of("db", "db2",
"ns2")))
+ .as("Should be false as namespace doesn't exist")
+ .isFalse();
}
@Test
@@ -412,7 +416,7 @@ public class TestHadoopCatalog extends HadoopTableTestBase {
@Test
public void testDropNamespace() throws IOException {
- String warehouseLocation = temp.newFolder().getAbsolutePath();
+ String warehouseLocation = tableDir.getAbsolutePath();
HadoopCatalog catalog = new HadoopCatalog();
catalog.setConf(new Configuration());
catalog.initialize(
@@ -429,15 +433,16 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
Assertions.assertThatThrownBy(() ->
catalog.dropNamespace(Namespace.of("db")))
.isInstanceOf(NamespaceNotEmptyException.class)
.hasMessage("Namespace " + namespace1 + " is not empty.");
- Assert.assertFalse(
- "Should fail to drop namespace doesn't exist",
catalog.dropNamespace(Namespace.of("db2")));
- Assert.assertTrue(catalog.dropTable(tbl1));
- Assert.assertTrue(catalog.dropTable(tbl2));
- Assert.assertTrue(catalog.dropNamespace(namespace2));
- Assert.assertTrue(catalog.dropNamespace(namespace1));
+ Assertions.assertThat(catalog.dropNamespace(Namespace.of("db2")))
+ .as("Should fail to drop namespace that doesn't exist")
+ .isFalse();
+ Assertions.assertThat(catalog.dropTable(tbl1)).isTrue();
+ Assertions.assertThat(catalog.dropTable(tbl2)).isTrue();
+ Assertions.assertThat(catalog.dropNamespace(namespace2)).isTrue();
+ Assertions.assertThat(catalog.dropNamespace(namespace1)).isTrue();
String metaLocation = warehouseLocation + "/" + "db";
FileSystem fs = Util.getFs(new Path(metaLocation), catalog.getConf());
- Assert.assertFalse(fs.isDirectory(new Path(metaLocation)));
+ Assertions.assertThat(fs.isDirectory(new Path(metaLocation))).isFalse();
}
@Test
@@ -457,9 +462,9 @@ public class TestHadoopCatalog extends HadoopTableTestBase {
}
// Check the result of the findVersion(), and load the table and check the
current snapshotId
- Assert.assertEquals(1, tableOperations.findVersion());
- Assert.assertEquals(
- secondSnapshotId,
TABLES.load(tableLocation).currentSnapshot().snapshotId());
+ Assertions.assertThat(tableOperations.findVersion()).isEqualTo(1);
+
Assertions.assertThat(TABLES.load(tableLocation).currentSnapshot().snapshotId())
+ .isEqualTo(secondSnapshotId);
// Write newer data to confirm that we are writing the correct file
io.deleteFile(versionHintFile.getPath());
@@ -468,26 +473,26 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
}
// Check the result of the findVersion(), and load the table and check the
current snapshotId
- Assert.assertEquals(3, tableOperations.findVersion());
- Assert.assertEquals(
- secondSnapshotId,
TABLES.load(tableLocation).currentSnapshot().snapshotId());
+ Assertions.assertThat(tableOperations.findVersion()).isEqualTo(3);
+
Assertions.assertThat(TABLES.load(tableLocation).currentSnapshot().snapshotId())
+ .isEqualTo(secondSnapshotId);
// Write an empty version hint file
io.deleteFile(versionHintFile.getPath());
io.newOutputFile(versionHintFile.getPath()).create().close();
// Check the result of the findVersion(), and load the table and check the
current snapshotId
- Assert.assertEquals(3, tableOperations.findVersion());
- Assert.assertEquals(
- secondSnapshotId,
TABLES.load(tableLocation).currentSnapshot().snapshotId());
+ Assertions.assertThat(tableOperations.findVersion()).isEqualTo(3);
+
Assertions.assertThat(TABLES.load(tableLocation).currentSnapshot().snapshotId())
+ .isEqualTo(secondSnapshotId);
// Just delete the file
io.deleteFile(versionHintFile.getPath());
// Check the result of the versionHint(), and load the table and check the
current snapshotId
- Assert.assertEquals(3, tableOperations.findVersion());
- Assert.assertEquals(
- secondSnapshotId,
TABLES.load(tableLocation).currentSnapshot().snapshotId());
+ Assertions.assertThat(tableOperations.findVersion()).isEqualTo(3);
+
Assertions.assertThat(TABLES.load(tableLocation).currentSnapshot().snapshotId())
+ .isEqualTo(secondSnapshotId);
}
@Test
@@ -507,9 +512,9 @@ public class TestHadoopCatalog extends HadoopTableTestBase {
io.deleteFile(tableOperations.getMetadataFile(1).toString());
// Check the result of the findVersion(), and load the table and check the
current snapshotId
- Assert.assertEquals(3, tableOperations.findVersion());
- Assert.assertEquals(
- secondSnapshotId,
TABLES.load(tableLocation).currentSnapshot().snapshotId());
+ Assertions.assertThat(tableOperations.findVersion()).isEqualTo(3);
+
Assertions.assertThat(TABLES.load(tableLocation).currentSnapshot().snapshotId())
+ .isEqualTo(secondSnapshotId);
// Remove all the version files, and see if we can recover. Hint... not :)
io.deleteFile(tableOperations.getMetadataFile(2).toString());
@@ -517,7 +522,7 @@ public class TestHadoopCatalog extends HadoopTableTestBase {
// Check that we got 0 findVersion, and a NoSuchTableException is thrown
when trying to load the
// table
- Assert.assertEquals(0, tableOperations.findVersion());
+ Assertions.assertThat(tableOperations.findVersion()).isEqualTo(0);
Assertions.assertThatThrownBy(() -> TABLES.load(tableLocation))
.isInstanceOf(NoSuchTableException.class)
.hasMessageStartingWith("Table does not exist");
@@ -530,13 +535,12 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
catalog.buildTable(tableIdent, SCHEMA).withPartitionSpec(SPEC).create();
Table table = catalog.loadTable(tableIdent);
- Assert.assertEquals("Name must match", "hadoop.db.ns1.ns2.tbl",
table.name());
+ Assertions.assertThat(table.name()).isEqualTo("hadoop.db.ns1.ns2.tbl");
TableIdentifier snapshotsTableIdent =
TableIdentifier.of("db", "ns1", "ns2", "tbl", "snapshots");
Table snapshotsTable = catalog.loadTable(snapshotsTableIdent);
- Assert.assertEquals(
- "Name must match", "hadoop.db.ns1.ns2.tbl.snapshots",
snapshotsTable.name());
+
Assertions.assertThat(snapshotsTable.name()).isEqualTo("hadoop.db.ns1.ns2.tbl.snapshots");
}
private static void addVersionsToTable(Table table) {
@@ -579,28 +583,25 @@ public class TestHadoopCatalog extends
HadoopTableTestBase {
.withProperty("key5", "table-key5")
.create();
- Assert.assertEquals(
- "Table defaults set for the catalog must be added to the table
properties.",
- "catalog-default-key1",
- table.properties().get("key1"));
- Assert.assertEquals(
- "Table property must override table default properties set at catalog
level.",
- "table-key2",
- table.properties().get("key2"));
- Assert.assertEquals(
- "Table property override set at catalog level must override table
default"
- + " properties set at catalog level and table property specified.",
- "catalog-override-key3",
- table.properties().get("key3"));
- Assert.assertEquals(
- "Table override not in table props or defaults should be added to
table properties",
- "catalog-override-key4",
- table.properties().get("key4"));
- Assert.assertEquals(
- "Table properties without any catalog level default or override should
be added to table"
- + " properties.",
- "table-key5",
- table.properties().get("key5"));
+ Assertions.assertThat(table.properties().get("key1"))
+ .as("Table defaults set for the catalog must be added to the table
properties.")
+ .isEqualTo("catalog-default-key1");
+ Assertions.assertThat(table.properties().get("key2"))
+ .as("Table property must override table default properties set at
catalog level.")
+ .isEqualTo("table-key2");
+ Assertions.assertThat(table.properties().get("key3"))
+ .as(
+ "Table property override set at catalog level must override table
default"
+ + " properties set at catalog level and table property
specified.")
+ .isEqualTo("catalog-override-key3");
+ Assertions.assertThat(table.properties().get("key4"))
+ .as("Table override not in table props or defaults should be added to
table properties")
+ .isEqualTo("catalog-override-key4");
+ Assertions.assertThat(table.properties().get("key5"))
+ .as(
+ "Table properties without any catalog level default or override
should be added to table"
+ + " properties.")
+ .isEqualTo("table-key5");
}
@Test
diff --git
a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCommits.java
b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCommits.java
index 31f8200ca5..b9c23f33a5 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCommits.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCommits.java
@@ -21,9 +21,6 @@ package org.apache.iceberg.hadoop;
import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.apache.iceberg.types.Types.NestedField.required;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
@@ -57,8 +54,8 @@ import
org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.Tasks;
import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.mockito.Mockito;
public class TestHadoopCommits extends HadoopTableTestBase {
@@ -67,55 +64,53 @@ public class TestHadoopCommits extends HadoopTableTestBase {
public void testCreateTable() throws Exception {
PartitionSpec expectedSpec =
PartitionSpec.builderFor(TABLE_SCHEMA).bucket("data", 16).build();
- Assert.assertEquals(
- "Table schema should match schema with reassigned ids",
- TABLE_SCHEMA.asStruct(),
- table.schema().asStruct());
- Assert.assertEquals(
- "Table partition spec should match with reassigned ids", expectedSpec,
table.spec());
+ Assertions.assertThat(table.schema().asStruct())
+ .as("Table schema should match schema with reassigned ids")
+ .isEqualTo(TABLE_SCHEMA.asStruct());
+ Assertions.assertThat(table.spec())
+ .as("Table partition spec should match with reassigned ids")
+ .isEqualTo(expectedSpec);
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
- Assert.assertEquals("Should not create any scan tasks", 0, tasks.size());
-
- Assert.assertTrue("Table location should exist", tableDir.exists());
- Assert.assertTrue(
- "Should create metadata folder", metadataDir.exists() &&
metadataDir.isDirectory());
- Assert.assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
- Assert.assertTrue("Should create version hint file",
versionHintFile.exists());
- Assert.assertEquals("Should write the current version to the hint file",
1, readVersionHint());
-
+ Assertions.assertThat(tasks).as("Should not create any scan
tasks").isEmpty();
+ Assertions.assertThat(tableDir).as("Table location should exist").exists();
+ Assertions.assertThat(metadataDir).as("Should create metadata
folder").exists().isDirectory();
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
+ Assertions.assertThat(version(2)).as("Should not create v2 or newer
versions").doesNotExist();
+ Assertions.assertThat(versionHintFile).as("Should create version hint
file").exists();
+ Assertions.assertThat(readVersionHint())
+ .as("Should write the current version to the hint file")
+ .isEqualTo(1);
List<File> manifests = listManifestFiles();
- Assert.assertEquals("Should contain 0 Avro manifest files", 0,
manifests.size());
+ Assertions.assertThat(manifests).as("Should contain 0 Avro manifest
files").isEmpty();
}
@Test
public void testSchemaUpdate() throws Exception {
- Assert.assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
+ Assertions.assertThat(version(2)).as("Should not create v2 or newer
versions").doesNotExist();
table.updateSchema().addColumn("n", Types.IntegerType.get()).commit();
- Assert.assertTrue(
- "Should create v2 for the update", version(2).exists() &&
version(2).isFile());
- Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
-
- Assert.assertEquals(
- "Table schema should match schema with reassigned ids",
- UPDATED_SCHEMA.asStruct(),
- table.schema().asStruct());
+ Assertions.assertThat(version(2)).as("Should create v2 for the
update").exists().isFile();
+ Assertions.assertThat(readVersionHint())
+ .as("Should write the current version to the hint file")
+ .isEqualTo(2);
+ Assertions.assertThat(table.schema().asStruct())
+ .as("Table schema should match schema with reassigned ids")
+ .isEqualTo(UPDATED_SCHEMA.asStruct());
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
- Assert.assertEquals("Should not create any scan tasks", 0, tasks.size());
+ Assertions.assertThat(tasks).as("Should not create any scan
tasks").isEmpty();
List<File> manifests = listManifestFiles();
- Assert.assertEquals("Should contain 0 Avro manifest files", 0,
manifests.size());
+ Assertions.assertThat(manifests).as("Should contain 0 Avro manifest
files").isEmpty();
}
@Test
public void testSchemaUpdateComplexType() throws Exception {
- Assert.assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
+ Assertions.assertThat(version(2)).as("Should not create v2 or newer
versions").doesNotExist();
Types.StructType complexColumn =
Types.StructType.of(
@@ -145,25 +140,25 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
table.updateSchema().addColumn("complex", complexColumn).commit();
- Assert.assertTrue(
- "Should create v2 for the update", version(2).exists() &&
version(2).isFile());
- Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
- Assert.assertEquals(
- "Table schema should match schema with reassigned ids",
- updatedSchema.asStruct(),
- table.schema().asStruct());
+ Assertions.assertThat(version(2)).as("Should create v2 for the
update").exists().isFile();
+ Assertions.assertThat(readVersionHint())
+ .as("Should write the current version to the hint file")
+ .isEqualTo(2);
+ Assertions.assertThat(table.schema().asStruct())
+ .as("Table schema should match schema with reassigned ids")
+ .isEqualTo(updatedSchema.asStruct());
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
- Assert.assertEquals("Should not create any scan tasks", 0, tasks.size());
+ Assertions.assertThat(tasks).as("Should not create any scan
tasks").isEmpty();
List<File> manifests = listManifestFiles();
- Assert.assertEquals("Should contain 0 Avro manifest files", 0,
manifests.size());
+ Assertions.assertThat(manifests).as("Should contain 0 Avro manifest
files").isEmpty();
}
@Test
public void testSchemaUpdateIdentifierFields() throws Exception {
- Assert.assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
+ Assertions.assertThat(version(2)).as("Should not create v2 or newer
versions").doesNotExist();
Schema updatedSchema =
new Schema(
@@ -174,17 +169,16 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
table.updateSchema().setIdentifierFields("id").commit();
- Assert.assertTrue(
- "Should create v2 for the update", version(2).exists() &&
version(2).isFile());
- Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
- Assert.assertEquals(
- "Table schema should match schema with reassigned ids",
- updatedSchema.asStruct(),
- table.schema().asStruct());
- Assert.assertEquals(
- "Identifier fields should match schema with reassigned ids",
- updatedSchema.identifierFieldIds(),
- table.schema().identifierFieldIds());
+ Assertions.assertThat(version(2)).as("Should create v2 for the
update").exists().isFile();
+ Assertions.assertThat(readVersionHint())
+ .as("Should write the current version to the hint file")
+ .isEqualTo(2);
+ Assertions.assertThat(table.schema().asStruct())
+ .as("Table schema should match schema with reassigned ids")
+ .isEqualTo(updatedSchema.asStruct());
+ Assertions.assertThat(table.schema().identifierFieldIds())
+ .as("Identifier fields should match schema with reassigned ids")
+ .isEqualTo(updatedSchema.identifierFieldIds());
}
@Test
@@ -193,8 +187,8 @@ public class TestHadoopCommits extends HadoopTableTestBase {
UpdateSchema update = table.updateSchema().addColumn("n",
Types.IntegerType.get());
update.apply();
- Assert.assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
+ Assertions.assertThat(version(2)).as("Should not create v2 or newer
versions").doesNotExist();
version(2).createNewFile();
@@ -203,15 +197,15 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
.hasMessageStartingWith("Version 2 already exists");
List<File> manifests = listManifestFiles();
- Assert.assertEquals("Should contain 0 Avro manifest files", 0,
manifests.size());
+ Assertions.assertThat(manifests).as("Should contain 0 Avro manifest
files").isEmpty();
}
@Test
public void testStaleMetadata() throws Exception {
Table tableCopy = TABLES.load(tableLocation);
- Assert.assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
+ Assertions.assertThat(version(2)).as("Should not create v2 or newer
versions").doesNotExist();
// prepare changes on the copy without committing
UpdateSchema updateCopy = tableCopy.updateSchema().addColumn("m",
Types.IntegerType.get());
@@ -219,59 +213,55 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
table.updateSchema().addColumn("n", Types.IntegerType.get()).commit();
- Assert.assertTrue(
- "Should create v2 for the update", version(2).exists() &&
version(2).isFile());
- Assert.assertNotEquals(
- "Unmodified copy should be out of date after update",
- table.schema().asStruct(),
- tableCopy.schema().asStruct());
+ Assertions.assertThat(version(2)).as("Should create v2 for the
update").exists().isFile();
+ Assertions.assertThat(table.schema().asStruct())
+ .as("Unmodified copy should be out of date after update")
+ .isNotEqualTo(tableCopy.schema().asStruct());
// update the table
tableCopy.refresh();
- Assert.assertEquals(
- "Copy should be back in sync", table.schema().asStruct(),
tableCopy.schema().asStruct());
+ Assertions.assertThat(table.schema().asStruct())
+ .as("Copy should be back in sync")
+ .isEqualTo(tableCopy.schema().asStruct());
Assertions.assertThatThrownBy(updateCopy::commit)
.isInstanceOf(CommitFailedException.class)
.hasMessage("Cannot commit changes based on stale table metadata");
List<File> manifests = listManifestFiles();
- Assert.assertEquals("Should contain 0 Avro manifest files", 0,
manifests.size());
+ Assertions.assertThat(manifests).as("Should contain 0 Avro manifest
files").isEmpty();
}
@Test
public void testStaleVersionHint() throws Exception {
Table stale = TABLES.load(tableLocation);
- Assert.assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
+ Assertions.assertThat(version(2)).as("Should not create v2 or newer
versions").doesNotExist();
table.updateSchema().addColumn("n", Types.IntegerType.get()).commit();
- Assert.assertTrue(
- "Should create v2 for the update", version(2).exists() &&
version(2).isFile());
- Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
-
- Assert.assertNotEquals(
- "Stable table schema should not match",
- UPDATED_SCHEMA.asStruct(),
- stale.schema().asStruct());
+ Assertions.assertThat(version(2)).as("Should create v2 for the
update").exists().isFile();
+ Assertions.assertThat(readVersionHint())
+ .as("Should write the current version to the hint file")
+ .isEqualTo(2);
+ Assertions.assertThat(stale.schema().asStruct())
+ .as("Stable table schema should not match")
+ .isNotEqualTo(UPDATED_SCHEMA.asStruct());
// roll the version hint back to 1
replaceVersionHint(1);
Table reloaded = TABLES.load(tableLocation);
- Assert.assertEquals(
- "Updated schema for newly loaded table should match",
- UPDATED_SCHEMA.asStruct(),
- reloaded.schema().asStruct());
+ Assertions.assertThat(reloaded.schema().asStruct())
+ .as("Updated schema for newly loaded table should match")
+ .isEqualTo(UPDATED_SCHEMA.asStruct());
stale.refresh();
- Assert.assertEquals(
- "Refreshed schema for stale table should match",
- UPDATED_SCHEMA.asStruct(),
- reloaded.schema().asStruct());
+ Assertions.assertThat(reloaded.schema().asStruct())
+ .as("Refreshed schema for stale table should match")
+ .isEqualTo(UPDATED_SCHEMA.asStruct());
}
@Test
@@ -279,33 +269,35 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
// first append
table.newFastAppend().appendFile(FILE_A).commit();
- Assert.assertTrue(
- "Should create v2 for the update", version(2).exists() &&
version(2).isFile());
- Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
+ Assertions.assertThat(version(2)).as("Should create v2 for the
update").exists().isFile();
+ Assertions.assertThat(readVersionHint())
+ .as("Should write the current version to the hint file")
+ .isEqualTo(2);
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
- Assert.assertEquals("Should scan 1 file", 1, tasks.size());
+ Assertions.assertThat(tasks).as("Should scan 1 file").hasSize(1);
List<File> manifests = listManifestFiles();
- Assert.assertEquals("Should contain only one Avro manifest file", 1,
manifests.size());
+ Assertions.assertThat(manifests).as("Should contain only one Avro manifest
file").hasSize(1);
// second append
table.newFastAppend().appendFile(FILE_B).commit();
- Assert.assertTrue(
- "Should create v3 for the update", version(3).exists() &&
version(3).isFile());
- Assert.assertEquals("Should write the current version to the hint file",
3, readVersionHint());
+ Assertions.assertThat(version(3)).as("Should create v3 for the
update").exists().isFile();
+ Assertions.assertThat(readVersionHint())
+ .as("Should write the current version to the hint file")
+ .isEqualTo(3);
tasks = Lists.newArrayList(table.newScan().planFiles());
- Assert.assertEquals("Should scan 2 files", 2, tasks.size());
-
- Assert.assertEquals("Should contain 2 Avro manifest files", 2,
listManifestFiles().size());
+ Assertions.assertThat(tasks).as("Should scan 2 files").hasSize(2);
+ Assertions.assertThat(listManifestFiles())
+ .as("Should contain 2 Avro manifest files")
+ .hasSize(2);
TableMetadata metadata = readMetadataVersion(3);
- Assert.assertEquals(
- "Current snapshot should contain 2 manifests",
- 2,
- metadata.currentSnapshot().allManifests(table.io()).size());
+ Assertions.assertThat(metadata.currentSnapshot().allManifests(table.io()))
+ .as("Current snapshot should contain 2 manifests")
+ .hasSize(2);
}
@Test
@@ -319,15 +311,16 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
table.newAppend().appendFile(FILE_C).commit();
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
- Assert.assertEquals("Should scan 3 files", 3, tasks.size());
+ Assertions.assertThat(tasks).as("Should scan 3 files").hasSize(3);
- Assert.assertEquals("Should contain 3 Avro manifest files", 3,
listManifestFiles().size());
+ Assertions.assertThat(listManifestFiles())
+ .as("Should contain 3 Avro manifest files")
+ .hasSize(3);
TableMetadata metadata = readMetadataVersion(5);
- Assert.assertEquals(
- "Current snapshot should contain 1 merged manifest",
- 1,
- metadata.currentSnapshot().allManifests(table.io()).size());
+ Assertions.assertThat(metadata.currentSnapshot().allManifests(table.io()))
+ .as("Current snapshot should contain 1 merged manifest")
+ .hasSize(1);
}
@Test
@@ -351,9 +344,9 @@ public class TestHadoopCommits extends HadoopTableTestBase {
* provided {@link FileSystem} object. The provided FileSystem will be
injected for commit call.
*/
private void testRenameWithFileSystem(FileSystem mockFs) throws Exception {
- assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- assertFalse("Should not create v2 or newer versions", version(2).exists());
- assertTrue(table instanceof BaseTable);
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
+ Assertions.assertThat(version(2)).as("Should not create v2 or newer
versions").doesNotExist();
+ Assertions.assertThat(table).isInstanceOf(BaseTable.class);
BaseTable baseTable = (BaseTable) table;
// use v1 metafile as the test rename destination.
TableMetadata meta1 = baseTable.operations().current();
@@ -362,12 +355,14 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
// (so that we have 2 valid and different metadata files, which will reach
the rename part
// during commit)
table.updateSchema().addColumn("n", Types.IntegerType.get()).commit();
- assertTrue("Should create v2 for the update", version(2).exists() &&
version(2).isFile());
- assertEquals("Should write the current version to the hint file", 2,
readVersionHint());
+ Assertions.assertThat(version(2)).as("Should create v2 for the
update").exists().isFile();
+ Assertions.assertThat(readVersionHint())
+ .as("Should write the current version to the hint file")
+ .isEqualTo(2);
// mock / spy the classes for testing
TableOperations tops = baseTable.operations();
- assertTrue(tops instanceof HadoopTableOperations);
+ Assertions.assertThat(tops).isInstanceOf(HadoopTableOperations.class);
HadoopTableOperations spyOps = Mockito.spy((HadoopTableOperations) tops);
// inject the mockFS into the TableOperations
@@ -379,12 +374,14 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
Set<String> actual =
listMetadataJsonFiles().stream().map(File::getName).collect(Collectors.toSet());
Set<String> expected = Sets.newHashSet("v1.metadata.json",
"v2.metadata.json");
- assertEquals("only v1 and v2 metadata.json should exist.", expected,
actual);
+ Assertions.assertThat(actual)
+ .as("only v1 and v2 metadata.json should exist.")
+ .isEqualTo(expected);
}
@Test
public void testCanReadOldCompressedManifestFiles() throws Exception {
- assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
// do a file append
table.newAppend().appendFile(FILE_A).commit();
@@ -396,22 +393,20 @@ public class TestHadoopCommits extends
HadoopTableTestBase {
List<File> metadataFiles = listMetadataJsonFiles();
- assertEquals("Should have two versions", 2, metadataFiles.size());
- assertTrue(
- "Metadata should be compressed with old format.",
- metadataFiles.stream().allMatch(f ->
f.getName().endsWith(".metadata.json.gz")));
+ Assertions.assertThat(metadataFiles).as("Should have two
versions").hasSize(2);
+ Assertions.assertThat(metadataFiles.stream().map(File::getName))
+ .as("Metadata should be compressed with old format.")
+ .allMatch(f -> f.endsWith(".metadata.json.gz"));
Table reloaded = TABLES.load(tableLocation);
List<FileScanTask> tasks =
Lists.newArrayList(reloaded.newScan().planFiles());
- Assert.assertEquals("Should scan 1 files", 1, tasks.size());
+ Assertions.assertThat(tasks).as("Should scan 1 files").hasSize(1);
}
@Test
- public void testConcurrentFastAppends() throws Exception {
- assertTrue("Should create v1 metadata", version(1).exists() &&
version(1).isFile());
- File dir = temp.newFolder();
- dir.delete();
+ public void testConcurrentFastAppends(@TempDir File dir) throws Exception {
+ Assertions.assertThat(version(1)).as("Should create v1
metadata").exists().isFile();
int threadsCount = 5;
int numberOfCommitedFilesPerThread = 10;
Table tableWithHighRetries =
@@ -453,8 +448,7 @@ public class TestHadoopCommits extends HadoopTableTestBase {
});
tableWithHighRetries.refresh();
- assertEquals(
- threadsCount * numberOfCommitedFilesPerThread,
- Lists.newArrayList(tableWithHighRetries.snapshots()).size());
+ Assertions.assertThat(Lists.newArrayList(tableWithHighRetries.snapshots()))
+ .hasSize(threadsCount * numberOfCommitedFilesPerThread);
}
}
diff --git a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopTables.java
b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopTables.java
index fa80712af3..e3d32442e2 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopTables.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopTables.java
@@ -41,11 +41,8 @@ import org.apache.iceberg.transforms.Transform;
import org.apache.iceberg.transforms.Transforms;
import org.apache.iceberg.types.Types;
import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
public class TestHadoopTables {
@@ -55,20 +52,15 @@ public class TestHadoopTables {
required(1, "id", Types.IntegerType.get(), "unique ID"),
required(2, "data", Types.StringType.get()));
- @Rule public TemporaryFolder temp = new TemporaryFolder();
- private File tableDir = null;
-
- @Before
- public void setupTableLocation() throws Exception {
- tableDir = temp.newFolder();
- }
+ @TempDir private File tableDir;
+ @TempDir private File dataDir;
@Test
public void testTableExists() {
- Assert.assertFalse(TABLES.exists(tableDir.toURI().toString()));
+
Assertions.assertThat(TABLES.exists(tableDir.toURI().toString())).isFalse();
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).bucket("data",
16).build();
TABLES.create(SCHEMA, spec, tableDir.toURI().toString());
- Assert.assertTrue(TABLES.exists(tableDir.toURI().toString()));
+ Assertions.assertThat(TABLES.exists(tableDir.toURI().toString())).isTrue();
}
@Test
@@ -83,7 +75,6 @@ public class TestHadoopTables {
@Test
public void testDropTableWithPurge() throws IOException {
- File dataDir = temp.newFolder();
createDummyTable(tableDir, dataDir);
@@ -92,16 +83,13 @@ public class TestHadoopTables {
.isInstanceOf(NoSuchTableException.class)
.hasMessageStartingWith("Table does not exist");
- Assert.assertEquals(0, dataDir.listFiles().length);
- Assert.assertFalse(tableDir.exists());
-
- Assert.assertFalse(TABLES.dropTable(tableDir.toURI().toString()));
+ Assertions.assertThat(dataDir.listFiles()).hasSize(0);
+ Assertions.assertThat(tableDir).doesNotExist();
+
Assertions.assertThat(TABLES.dropTable(tableDir.toURI().toString())).isFalse();
}
@Test
public void testDropTableWithoutPurge() throws IOException {
- File dataDir = temp.newFolder();
-
createDummyTable(tableDir, dataDir);
TABLES.dropTable(tableDir.toURI().toString(), false);
@@ -109,10 +97,9 @@ public class TestHadoopTables {
.isInstanceOf(NoSuchTableException.class)
.hasMessageStartingWith("Table does not exist");
- Assert.assertEquals(1, dataDir.listFiles().length);
- Assert.assertFalse(tableDir.exists());
-
- Assert.assertFalse(TABLES.dropTable(tableDir.toURI().toString()));
+ Assertions.assertThat(dataDir.listFiles()).hasSize(1);
+ Assertions.assertThat(tableDir).doesNotExist();
+
Assertions.assertThat(TABLES.dropTable(tableDir.toURI().toString())).isFalse();
}
@Test
@@ -121,8 +108,8 @@ public class TestHadoopTables {
Table table = TABLES.create(SCHEMA, spec, tableDir.toURI().toString());
SortOrder sortOrder = table.sortOrder();
- Assert.assertEquals("Order ID must match", 0, sortOrder.orderId());
- Assert.assertTrue("Order must unsorted", sortOrder.isUnsorted());
+ Assertions.assertThat(sortOrder.orderId()).as("Order ID must
match").isEqualTo(0);
+ Assertions.assertThat(sortOrder.isUnsorted()).as("Order must be
unsorted").isTrue();
}
@Test
@@ -133,13 +120,18 @@ public class TestHadoopTables {
TABLES.create(SCHEMA, spec, order, Maps.newHashMap(),
tableDir.toURI().toString());
SortOrder sortOrder = table.sortOrder();
- Assert.assertEquals("Order ID must match", 1, sortOrder.orderId());
- Assert.assertEquals("Order must have 1 field", 1,
sortOrder.fields().size());
- Assert.assertEquals("Direction must match ", ASC,
sortOrder.fields().get(0).direction());
- Assert.assertEquals(
- "Null order must match ", NULLS_FIRST,
sortOrder.fields().get(0).nullOrder());
+ Assertions.assertThat(sortOrder.orderId()).as("Order ID must
match").isEqualTo(1);
+ Assertions.assertThat(sortOrder.fields()).as("Order must have 1
field").hasSize(1);
+ Assertions.assertThat(sortOrder.fields().get(0).direction())
+ .as("Direction must match")
+ .isEqualTo(ASC);
+ Assertions.assertThat(sortOrder.fields().get(0).nullOrder())
+ .as("Null order must match")
+ .isEqualTo(NULLS_FIRST);
Transform<?, ?> transform = Transforms.identity();
- Assert.assertEquals("Transform must match", transform,
sortOrder.fields().get(0).transform());
+ Assertions.assertThat(sortOrder.fields().get(0).transform())
+ .as("Transform must match")
+ .isEqualTo(transform);
}
@Test
@@ -149,10 +141,12 @@ public class TestHadoopTables {
TABLES.create(SCHEMA, spec, location);
Table table = TABLES.load(location);
- Assert.assertEquals("Name must match", location, table.name());
+ Assertions.assertThat(table.name()).as("Name must
match").isEqualTo(location);
Table snapshotsTable = TABLES.load(location + "#snapshots");
- Assert.assertEquals("Name must match", location + "#snapshots",
snapshotsTable.name());
+ Assertions.assertThat(snapshotsTable.name())
+ .as("Name must match")
+ .isEqualTo(location + "#snapshots");
}
private static void createDummyTable(File tableDir, File dataDir) throws
IOException {
@@ -170,7 +164,7 @@ public class TestHadoopTables {
append.commit();
// Make sure that the data file and the manifest dir is created
- Assert.assertEquals(1, dataDir.listFiles().length);
- Assert.assertEquals(1, tableDir.listFiles().length);
+ Assertions.assertThat(dataDir.listFiles()).hasSize(1);
+ Assertions.assertThat(tableDir.listFiles()).hasSize(1);
}
}
diff --git a/core/src/test/java/org/apache/iceberg/hadoop/TestStaticTable.java
b/core/src/test/java/org/apache/iceberg/hadoop/TestStaticTable.java
index a0562be3eb..7cb57d72d3 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/TestStaticTable.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/TestStaticTable.java
@@ -22,10 +22,8 @@ import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.MetadataTableType;
import org.apache.iceberg.StaticTableOperations;
import org.apache.iceberg.Table;
-import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestStaticTable extends HadoopTableTestBase {
@@ -41,9 +39,9 @@ public class TestStaticTable extends HadoopTableTestBase {
@Test
public void testLoadFromMetadata() {
Table staticTable = getStaticTable();
- Assert.assertTrue(
- "Loading a metadata file based table should return
StaticTableOperations",
- ((HasTableOperations) staticTable).operations() instanceof
StaticTableOperations);
+ Assertions.assertThat(((HasTableOperations) staticTable).operations())
+ .as("Loading a metadata file based table should return
StaticTableOperations")
+ .isInstanceOf(StaticTableOperations.class);
}
@Test
@@ -88,13 +86,13 @@ public class TestStaticTable extends HadoopTableTestBase {
table.newAppend().appendFile(FILE_B).commit();
table.newOverwrite().deleteFile(FILE_B).addFile(FILE_C).commit();
Table staticTable = getStaticTable();
- Assert.assertTrue("Same history?",
table.history().containsAll(staticTable.history()));
- Assert.assertTrue(
- "Same snapshot?",
- table.currentSnapshot().snapshotId() ==
staticTable.currentSnapshot().snapshotId());
- Assert.assertTrue(
- "Same properties?",
- Maps.difference(table.properties(),
staticTable.properties()).areEqual());
+ Assertions.assertThat(table.history()).as("Same
history?").containsAll(staticTable.history());
+ Assertions.assertThat(table.currentSnapshot().snapshotId())
+ .as("Same snapshot?")
+ .isEqualTo(staticTable.currentSnapshot().snapshotId());
+ Assertions.assertThat(table.properties())
+ .as("Same properties?")
+ .isEqualTo(staticTable.properties());
}
@Test
@@ -107,19 +105,18 @@ public class TestStaticTable extends HadoopTableTestBase {
table.newOverwrite().deleteFile(FILE_B).addFile(FILE_C).commit();
staticTable.refresh();
- Assert.assertEquals(
- "Snapshot unchanged after table modified",
- staticTable.currentSnapshot().snapshotId(),
- originalSnapshot);
+ Assertions.assertThat(staticTable.currentSnapshot().snapshotId())
+ .as("Snapshot unchanged after table modified")
+ .isEqualTo(originalSnapshot);
}
@Test
public void testMetadataTables() {
for (MetadataTableType type : MetadataTableType.values()) {
String enumName = type.name().replace("_", "").toLowerCase();
- Assert.assertTrue(
- "Should be able to get MetadataTable of type : " + type,
-
getStaticTable(type).getClass().getName().toLowerCase().contains(enumName));
+
Assertions.assertThat(getStaticTable(type).getClass().getName().toLowerCase())
+ .as("Should be able to get MetadataTable of type : " + type)
+ .contains(enumName);
}
}
}
diff --git
a/core/src/test/java/org/apache/iceberg/hadoop/TestTableSerialization.java
b/core/src/test/java/org/apache/iceberg/hadoop/TestTableSerialization.java
index 380ebcac6d..ed5b5361df 100644
--- a/core/src/test/java/org/apache/iceberg/hadoop/TestTableSerialization.java
+++ b/core/src/test/java/org/apache/iceberg/hadoop/TestTableSerialization.java
@@ -41,8 +41,8 @@ import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Types;
-import org.junit.Assert;
-import org.junit.Test;
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
public class TestTableSerialization extends HadoopTableTestBase {
@@ -112,10 +112,10 @@ public class TestTableSerialization extends
HadoopTableTestBase {
Set<CharSequence> deserializedFiles = getFiles(deserialized);
// Checks that the deserialized data stays the same
- Assert.assertEquals(expected, deserializedFiles);
+ Assertions.assertThat(deserializedFiles).isEqualTo(expected);
// We expect that the files changed in the meantime
- Assert.assertNotEquals(getFiles(table), deserializedFiles);
+ Assertions.assertThat(deserializedFiles).isNotEqualTo(getFiles(table));
}
@Test
@@ -143,13 +143,13 @@ public class TestTableSerialization extends
HadoopTableTestBase {
Set<CharSequence> deserializedFiles =
getFiles(deserializeFromBytes(serialized.get(type)));
// Checks that the deserialized data stays the same
- Assert.assertEquals(expected.get(type), deserializedFiles);
+ Assertions.assertThat(deserializedFiles).isEqualTo(expected.get(type));
// Collect the current data
Set<CharSequence> newFiles = getFiles(getMetaDataTable(table, type));
// Expect that the new data is changed in the meantime
- Assert.assertNotEquals(newFiles, deserializedFiles);
+ Assertions.assertThat(deserializedFiles).isNotEqualTo(newFiles);
}
}
diff --git
a/core/src/test/java/org/apache/iceberg/rest/RequestResponseTestBase.java
b/core/src/test/java/org/apache/iceberg/rest/RequestResponseTestBase.java
index cf5879f8ea..4855e9a6c7 100644
--- a/core/src/test/java/org/apache/iceberg/rest/RequestResponseTestBase.java
+++ b/core/src/test/java/org/apache/iceberg/rest/RequestResponseTestBase.java
@@ -25,8 +25,7 @@ import java.util.Collections;
import java.util.Set;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public abstract class RequestResponseTestBase<T extends RESTMessage> {
@@ -76,11 +75,13 @@ public abstract class RequestResponseTestBase<T extends
RESTMessage> {
try {
JsonNode node = mapper().readValue(serialize(createExampleInstance()),
JsonNode.class);
for (String field : fieldsFromSpec) {
- Assert.assertTrue("Should have field: " + field, node.has(field));
+ Assertions.assertThat(node.has(field)).as("Should have field: %s",
field).isTrue();
}
for (String field : ((Iterable<? extends String>) node::fieldNames)) {
- Assert.assertTrue("Should not have field: " + field,
fieldsFromSpec.contains(field));
+ Assertions.assertThat(fieldsFromSpec)
+ .as("Should not have field: %s", field)
+ .contains(field);
}
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
diff --git a/core/src/test/java/org/apache/iceberg/rest/TestHTTPClient.java
b/core/src/test/java/org/apache/iceberg/rest/TestHTTPClient.java
index 63f16cb2ea..e596df43e6 100644
--- a/core/src/test/java/org/apache/iceberg/rest/TestHTTPClient.java
+++ b/core/src/test/java/org/apache/iceberg/rest/TestHTTPClient.java
@@ -44,10 +44,9 @@ import
org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.rest.responses.ErrorResponse;
import org.apache.iceberg.rest.responses.ErrorResponseParser;
import org.assertj.core.api.Assertions;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.mockserver.integration.ClientAndServer;
import org.mockserver.model.HttpRequest;
import org.mockserver.model.HttpResponse;
@@ -68,7 +67,7 @@ public class TestHTTPClient {
private static ClientAndServer mockServer;
private static RESTClient restClient;
- @BeforeClass
+ @BeforeAll
public static void beforeClass() {
mockServer = startClientAndServer(PORT);
restClient = HTTPClient.builder(ImmutableMap.of()).uri(URI).build();
@@ -76,7 +75,7 @@ public class TestHTTPClient {
icebergBuildFullVersion = IcebergBuild.fullVersion();
}
- @AfterClass
+ @AfterAll
public static void stopServer() throws IOException {
mockServer.stop();
restClient.close();
@@ -147,10 +146,9 @@ public class TestHTTPClient {
doExecuteRequest(method, path, body, onError, h ->
assertThat(h).isNotEmpty());
if (method.usesRequestBody()) {
- Assert.assertEquals(
- "On a successful " + method + ", the correct response body should be
returned",
- successResponse,
- body);
+ Assertions.assertThat(body)
+ .as("On a successful " + method + ", the correct response body
should be returned")
+ .isEqualTo(successResponse);
}
verify(onError, never()).accept(any());
diff --git a/core/src/test/java/org/apache/iceberg/rest/TestRESTCatalog.java
b/core/src/test/java/org/apache/iceberg/rest/TestRESTCatalog.java
index d7cf707903..4695ac1b0f 100644
--- a/core/src/test/java/org/apache/iceberg/rest/TestRESTCatalog.java
+++ b/core/src/test/java/org/apache/iceberg/rest/TestRESTCatalog.java
@@ -75,14 +75,13 @@ import org.apache.iceberg.rest.responses.ErrorResponse;
import org.apache.iceberg.rest.responses.LoadTableResponse;
import org.apache.iceberg.rest.responses.OAuthTokenResponse;
import org.apache.iceberg.types.Types;
+import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.gzip.GzipHandler;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -279,20 +278,17 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
restCat.setConf(new Configuration());
restCat.initialize("prod", initialConfig);
- Assert.assertEquals(
- "Catalog properties after initialize should use the server's override
properties",
- "false",
- restCat.properties().get(CatalogProperties.CACHE_ENABLED));
+
Assertions.assertThat(restCat.properties().get(CatalogProperties.CACHE_ENABLED))
+ .as("Catalog properties after initialize should use the server's
override properties")
+ .isEqualTo("false");
- Assert.assertEquals(
- "Catalog after initialize should use the server's default properties
if not specified",
- "1",
- restCat.properties().get(CatalogProperties.CLIENT_POOL_SIZE));
+
Assertions.assertThat(restCat.properties().get(CatalogProperties.CLIENT_POOL_SIZE))
+ .as("Catalog after initialize should use the server's default
properties if not specified")
+ .isEqualTo("1");
- Assert.assertEquals(
- "Catalog should return final warehouse location",
- "s3://bucket/warehouse",
- restCat.properties().get(CatalogProperties.WAREHOUSE_LOCATION));
+
Assertions.assertThat(restCat.properties().get(CatalogProperties.WAREHOUSE_LOCATION))
+ .as("Catalog should return final warehouse location")
+ .isEqualTo("s3://bucket/warehouse");
restCat.close();
}
@@ -323,7 +319,7 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
catalog.initialize(
"prod", ImmutableMap.of(CatalogProperties.URI, "ignored", "token",
"bearer-token"));
- Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns",
"table")));
+ Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns",
"table"))).isFalse();
// the bearer token should be used for all interactions
Mockito.verify(adapter)
@@ -359,7 +355,7 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
catalog.initialize(
"prod", ImmutableMap.of(CatalogProperties.URI, "ignored",
"credential", "catalog:secret"));
- Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns",
"table")));
+ Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns",
"table"))).isFalse();
// no token or credential for catalog token exchange
Mockito.verify(adapter)
@@ -412,7 +408,7 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
catalog.initialize(
"prod", ImmutableMap.of(CatalogProperties.URI, "ignored", "token",
"bearer-token"));
- Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns",
"table")));
+ Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns",
"table"))).isFalse();
// use the bearer token for config
Mockito.verify(adapter)
@@ -467,7 +463,7 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
catalog.initialize(
"prod", ImmutableMap.of(CatalogProperties.URI, "ignored",
"credential", "catalog:secret"));
- Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns",
"table")));
+ Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns",
"table"))).isFalse();
// call client credentials with no initial auth
Mockito.verify(adapter)
@@ -539,7 +535,7 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
"token",
"bearer-token"));
- Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns",
"table")));
+ Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns",
"table"))).isFalse();
// use the bearer token for client credentials
Mockito.verify(adapter)
@@ -685,7 +681,7 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
catalog.initialize(
"prod", ImmutableMap.of(CatalogProperties.URI, "ignored", "token",
catalogToken));
- Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns",
"table")));
+ Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns",
"table"))).isFalse();
Mockito.verify(adapter)
.execute(
@@ -1046,12 +1042,14 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
required(2, "data", Types.StringType.get()));
Table table = catalog.createTable(ident, expectedSchema);
- Assertions.assertEquals(
- expectedSchema.asStruct(), table.schema().asStruct(), "Schema should
match");
+ Assertions.assertThat(table.schema().asStruct())
+ .as("Schema should match")
+ .isEqualTo(expectedSchema.asStruct());
Table loaded = catalog.loadTable(ident); // the first load will send the
token
- Assertions.assertEquals(
- expectedSchema.asStruct(), loaded.schema().asStruct(), "Schema should
match");
+ Assertions.assertThat(loaded.schema().asStruct())
+ .as("Schema should match")
+ .isEqualTo(expectedSchema.asStruct());
loaded.refresh(); // refresh to force reload
@@ -1286,7 +1284,8 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
.untilAsserted(
() -> {
// use the exchanged catalog token
-
Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns", "table")));
+
Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns", "table")))
+ .isFalse();
// call client credentials with no initial auth
Mockito.verify(adapter)
@@ -1435,7 +1434,7 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
catalog.initialize(
"prod", ImmutableMap.of(CatalogProperties.URI, "ignored",
"credential", credential));
- Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns",
"table")));
+ Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns",
"table"))).isFalse();
// call client credentials with no initial auth
Mockito.verify(adapter)
@@ -1521,7 +1520,7 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
RESTCatalog catalog = new RESTCatalog(context, (config) -> adapter);
catalog.initialize("prod", ImmutableMap.of(CatalogProperties.URI,
"ignored", "token", token));
- Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns",
"table")));
+ Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns",
"table"))).isFalse();
Mockito.verify(adapter)
.execute(
@@ -1614,7 +1613,8 @@ public class TestRESTCatalog extends
CatalogTests<RESTCatalog> {
.untilAsserted(
() -> {
// use the exchanged catalog token
-
Assertions.assertFalse(catalog.tableExists(TableIdentifier.of("ns", "table")));
+
Assertions.assertThat(catalog.tableExists(TableIdentifier.of("ns", "table")))
+ .isFalse();
// call client credentials with no initial auth
Mockito.verify(adapter)
diff --git a/core/src/test/java/org/apache/iceberg/rest/TestRESTUtil.java
b/core/src/test/java/org/apache/iceberg/rest/TestRESTUtil.java
index bf312e67df..680a8bcaa3 100644
--- a/core/src/test/java/org/apache/iceberg/rest/TestRESTUtil.java
+++ b/core/src/test/java/org/apache/iceberg/rest/TestRESTUtil.java
@@ -22,7 +22,7 @@ import java.util.Map;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestRESTUtil {
diff --git a/core/src/test/java/org/apache/iceberg/rest/TestResourcePaths.java
b/core/src/test/java/org/apache/iceberg/rest/TestResourcePaths.java
index dc1fbb0c9c..b2a5f5073e 100644
--- a/core/src/test/java/org/apache/iceberg/rest/TestResourcePaths.java
+++ b/core/src/test/java/org/apache/iceberg/rest/TestResourcePaths.java
@@ -21,8 +21,8 @@ package org.apache.iceberg.rest;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
-import org.junit.Assert;
-import org.junit.Test;
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
public class TestResourcePaths {
private final String prefix = "ws/catalog";
@@ -33,99 +33,106 @@ public class TestResourcePaths {
@Test
public void testConfigPath() {
// prefix does not affect the config route because config is merged into
catalog properties
- Assert.assertEquals(ResourcePaths.config(), "v1/config");
+ Assertions.assertThat(ResourcePaths.config()).isEqualTo("v1/config");
}
@Test
public void testNamespaces() {
- Assert.assertEquals("v1/ws/catalog/namespaces", withPrefix.namespaces());
- Assert.assertEquals("v1/namespaces", withoutPrefix.namespaces());
+
Assertions.assertThat(withPrefix.namespaces()).isEqualTo("v1/ws/catalog/namespaces");
+
Assertions.assertThat(withoutPrefix.namespaces()).isEqualTo("v1/namespaces");
}
@Test
public void testNamespace() {
Namespace ns = Namespace.of("ns");
- Assert.assertEquals("v1/ws/catalog/namespaces/ns",
withPrefix.namespace(ns));
- Assert.assertEquals("v1/namespaces/ns", withoutPrefix.namespace(ns));
+
Assertions.assertThat(withPrefix.namespace(ns)).isEqualTo("v1/ws/catalog/namespaces/ns");
+
Assertions.assertThat(withoutPrefix.namespace(ns)).isEqualTo("v1/namespaces/ns");
}
@Test
public void testNamespaceWithSlash() {
Namespace ns = Namespace.of("n/s");
- Assert.assertEquals("v1/ws/catalog/namespaces/n%2Fs",
withPrefix.namespace(ns));
- Assert.assertEquals("v1/namespaces/n%2Fs", withoutPrefix.namespace(ns));
+
Assertions.assertThat(withPrefix.namespace(ns)).isEqualTo("v1/ws/catalog/namespaces/n%2Fs");
+
Assertions.assertThat(withoutPrefix.namespace(ns)).isEqualTo("v1/namespaces/n%2Fs");
}
@Test
public void testNamespaceWithMultipartNamespace() {
Namespace ns = Namespace.of("n", "s");
- Assert.assertEquals("v1/ws/catalog/namespaces/n%1Fs",
withPrefix.namespace(ns));
- Assert.assertEquals("v1/namespaces/n%1Fs", withoutPrefix.namespace(ns));
+
Assertions.assertThat(withPrefix.namespace(ns)).isEqualTo("v1/ws/catalog/namespaces/n%1Fs");
+
Assertions.assertThat(withoutPrefix.namespace(ns)).isEqualTo("v1/namespaces/n%1Fs");
}
@Test
public void testNamespaceProperties() {
Namespace ns = Namespace.of("ns");
- Assert.assertEquals(
- "v1/ws/catalog/namespaces/ns/properties",
withPrefix.namespaceProperties(ns));
- Assert.assertEquals("v1/namespaces/ns/properties",
withoutPrefix.namespaceProperties(ns));
+ Assertions.assertThat(withPrefix.namespaceProperties(ns))
+ .isEqualTo("v1/ws/catalog/namespaces/ns/properties");
+ Assertions.assertThat(withoutPrefix.namespaceProperties(ns))
+ .isEqualTo("v1/namespaces/ns/properties");
}
@Test
public void testNamespacePropertiesWithSlash() {
Namespace ns = Namespace.of("n/s");
- Assert.assertEquals(
- "v1/ws/catalog/namespaces/n%2Fs/properties",
withPrefix.namespaceProperties(ns));
- Assert.assertEquals("v1/namespaces/n%2Fs/properties",
withoutPrefix.namespaceProperties(ns));
+ Assertions.assertThat(withPrefix.namespaceProperties(ns))
+ .isEqualTo("v1/ws/catalog/namespaces/n%2Fs/properties");
+ Assertions.assertThat(withoutPrefix.namespaceProperties(ns))
+ .isEqualTo("v1/namespaces/n%2Fs/properties");
}
@Test
public void testNamespacePropertiesWithMultipartNamespace() {
Namespace ns = Namespace.of("n", "s");
- Assert.assertEquals(
- "v1/ws/catalog/namespaces/n%1Fs/properties",
withPrefix.namespaceProperties(ns));
- Assert.assertEquals("v1/namespaces/n%1Fs/properties",
withoutPrefix.namespaceProperties(ns));
+ Assertions.assertThat(withPrefix.namespaceProperties(ns))
+ .isEqualTo("v1/ws/catalog/namespaces/n%1Fs/properties");
+ Assertions.assertThat(withoutPrefix.namespaceProperties(ns))
+ .isEqualTo("v1/namespaces/n%1Fs/properties");
}
@Test
public void testTables() {
Namespace ns = Namespace.of("ns");
- Assert.assertEquals("v1/ws/catalog/namespaces/ns/tables",
withPrefix.tables(ns));
- Assert.assertEquals("v1/namespaces/ns/tables", withoutPrefix.tables(ns));
+
Assertions.assertThat(withPrefix.tables(ns)).isEqualTo("v1/ws/catalog/namespaces/ns/tables");
+
Assertions.assertThat(withoutPrefix.tables(ns)).isEqualTo("v1/namespaces/ns/tables");
}
@Test
public void testTablesWithSlash() {
Namespace ns = Namespace.of("n/s");
- Assert.assertEquals("v1/ws/catalog/namespaces/n%2Fs/tables",
withPrefix.tables(ns));
- Assert.assertEquals("v1/namespaces/n%2Fs/tables",
withoutPrefix.tables(ns));
+
Assertions.assertThat(withPrefix.tables(ns)).isEqualTo("v1/ws/catalog/namespaces/n%2Fs/tables");
+
Assertions.assertThat(withoutPrefix.tables(ns)).isEqualTo("v1/namespaces/n%2Fs/tables");
}
@Test
public void testTablesWithMultipartNamespace() {
Namespace ns = Namespace.of("n", "s");
- Assert.assertEquals("v1/ws/catalog/namespaces/n%1Fs/tables",
withPrefix.tables(ns));
- Assert.assertEquals("v1/namespaces/n%1Fs/tables",
withoutPrefix.tables(ns));
+
Assertions.assertThat(withPrefix.tables(ns)).isEqualTo("v1/ws/catalog/namespaces/n%1Fs/tables");
+
Assertions.assertThat(withoutPrefix.tables(ns)).isEqualTo("v1/namespaces/n%1Fs/tables");
}
@Test
public void testTable() {
TableIdentifier ident = TableIdentifier.of("ns", "table");
- Assert.assertEquals("v1/ws/catalog/namespaces/ns/tables/table",
withPrefix.table(ident));
- Assert.assertEquals("v1/namespaces/ns/tables/table",
withoutPrefix.table(ident));
+ Assertions.assertThat(withPrefix.table(ident))
+ .isEqualTo("v1/ws/catalog/namespaces/ns/tables/table");
+
Assertions.assertThat(withoutPrefix.table(ident)).isEqualTo("v1/namespaces/ns/tables/table");
}
@Test
public void testTableWithSlash() {
TableIdentifier ident = TableIdentifier.of("n/s", "tab/le");
- Assert.assertEquals("v1/ws/catalog/namespaces/n%2Fs/tables/tab%2Fle",
withPrefix.table(ident));
- Assert.assertEquals("v1/namespaces/n%2Fs/tables/tab%2Fle",
withoutPrefix.table(ident));
+ Assertions.assertThat(withPrefix.table(ident))
+ .isEqualTo("v1/ws/catalog/namespaces/n%2Fs/tables/tab%2Fle");
+ Assertions.assertThat(withoutPrefix.table(ident))
+ .isEqualTo("v1/namespaces/n%2Fs/tables/tab%2Fle");
}
@Test
public void testTableWithMultipartNamespace() {
TableIdentifier ident = TableIdentifier.of("n", "s", "table");
- Assert.assertEquals("v1/ws/catalog/namespaces/n%1Fs/tables/table",
withPrefix.table(ident));
- Assert.assertEquals("v1/namespaces/n%1Fs/tables/table",
withoutPrefix.table(ident));
+ Assertions.assertThat(withPrefix.table(ident))
+ .isEqualTo("v1/ws/catalog/namespaces/n%1Fs/tables/table");
+
Assertions.assertThat(withoutPrefix.table(ident)).isEqualTo("v1/namespaces/n%1Fs/tables/table");
}
}