This is an automated email from the ASF dual-hosted git repository.

etudenhoefner pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git


The following commit(s) were added to refs/heads/master by this push:
     new 3caa3a28d0 Hive: Switch tests to JUnit5 (#8058)
3caa3a28d0 is described below

commit 3caa3a28d07a2d08b9a0e4196634126f1e016d6a
Author: Ashok <[email protected]>
AuthorDate: Sat Jul 15 14:57:09 2023 +0530

    Hive: Switch tests to JUnit5 (#8058)
---
 build.gradle                                       |   4 +
 .../iceberg/hive/HiveCreateReplaceTableTest.java   |  99 +++---
 .../org/apache/iceberg/hive/HiveMetastoreTest.java |   8 +-
 .../org/apache/iceberg/hive/HiveTableBaseTest.java |   8 +-
 .../org/apache/iceberg/hive/HiveTableTest.java     | 231 +++++++-------
 .../apache/iceberg/hive/TestCachedClientPool.java  |  74 ++---
 .../org/apache/iceberg/hive/TestHiveCatalog.java   | 338 +++++++++++----------
 .../apache/iceberg/hive/TestHiveClientPool.java    |  40 +--
 .../apache/iceberg/hive/TestHiveCommitLocks.java   |  57 ++--
 .../org/apache/iceberg/hive/TestHiveCommits.java   | 124 ++++----
 .../org/apache/iceberg/hive/TestHiveMetastore.java |   4 +-
 .../apache/iceberg/hive/TestHiveSchemaUtil.java    |  35 ++-
 .../iceberg/hive/TestHiveTableConcurrency.java     |  11 +-
 .../apache/iceberg/hive/TestLoadHiveCatalog.java   |  19 +-
 14 files changed, 537 insertions(+), 515 deletions(-)

diff --git a/build.gradle b/build.gradle
index 1a0803394d..187ebed52f 100644
--- a/build.gradle
+++ b/build.gradle
@@ -624,6 +624,10 @@ project(':iceberg-gcp') {
 }
 
 project(':iceberg-hive-metastore') {
+  test {
+    useJUnitPlatform()
+  }
+
   dependencies {
     implementation project(path: ':iceberg-bundled-guava', configuration: 
'shadow')
     implementation project(':iceberg-core')
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveCreateReplaceTableTest.java
 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveCreateReplaceTableTest.java
index dd70795a0b..61d3659740 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveCreateReplaceTableTest.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveCreateReplaceTableTest.java
@@ -20,8 +20,11 @@ package org.apache.iceberg.hive;
 
 import static org.apache.iceberg.PartitionSpec.builderFor;
 import static org.apache.iceberg.types.Types.NestedField.required;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 
 import java.io.IOException;
+import java.nio.file.Path;
 import org.apache.iceberg.AppendFiles;
 import org.apache.iceberg.DataFile;
 import org.apache.iceberg.DataFiles;
@@ -33,16 +36,12 @@ import org.apache.iceberg.Transaction;
 import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.exceptions.AlreadyExistsException;
 import org.apache.iceberg.exceptions.NoSuchTableException;
-import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
 import org.apache.iceberg.relocated.com.google.common.collect.Maps;
 import org.apache.iceberg.types.Types;
-import org.assertj.core.api.Assertions;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 public class HiveCreateReplaceTableTest extends HiveMetastoreTest {
 
@@ -53,23 +52,23 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
           required(3, "id", Types.IntegerType.get()), required(4, "data", 
Types.StringType.get()));
   private static final PartitionSpec SPEC = 
builderFor(SCHEMA).identity("id").build();
 
-  @Rule public TemporaryFolder temp = new TemporaryFolder();
+  @TempDir private Path temp;
 
   private String tableLocation;
 
-  @Before
+  @BeforeEach
   public void createTableLocation() throws IOException {
-    tableLocation = temp.newFolder("hive-").getPath();
+    tableLocation = temp.resolve("hive-").toString();
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     catalog.dropTable(TABLE_IDENTIFIER);
   }
 
   @Test
   public void testCreateTableTxn() {
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
     Transaction txn =
         catalog.newCreateTableTransaction(
@@ -77,17 +76,17 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
     txn.updateProperties().set("prop", "value").commit();
 
     // verify the table is still not visible before the transaction is 
committed
-    Assert.assertFalse(catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).isFalse();
 
     txn.commitTransaction();
 
     Table table = catalog.loadTable(TABLE_IDENTIFIER);
-    Assert.assertEquals("Table props should match", "value", 
table.properties().get("prop"));
+    assertThat(table.properties()).as("Table props should 
match").containsEntry("prop", "value");
   }
 
   @Test
   public void testCreateTableTxnTableCreatedConcurrently() {
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
     Transaction txn =
         catalog.newCreateTableTransaction(
@@ -95,16 +94,16 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
 
     // create the table concurrently
     catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
-    Assert.assertTrue("Table should be created", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should be 
created").isTrue();
 
-    Assertions.assertThatThrownBy(txn::commitTransaction)
+    assertThatThrownBy(txn::commitTransaction)
         .isInstanceOf(AlreadyExistsException.class)
         .hasMessage("Table already exists: hivedb.tbl");
   }
 
   @Test
   public void testCreateTableTxnAndAppend() {
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
     Transaction txn =
         catalog.newCreateTableTransaction(
@@ -123,19 +122,20 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
 
     Table table = catalog.loadTable(TABLE_IDENTIFIER);
     Snapshot snapshot = table.currentSnapshot();
-    Assert.assertTrue(
-        "Table should have one manifest file", 
snapshot.allManifests(table.io()).size() == 1);
+    assertThat(snapshot.allManifests(table.io()))
+        .as("Table should have one manifest file")
+        .hasSize(1);
   }
 
   @Test
   public void testCreateTableTxnTableAlreadyExists() {
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
     // create a table before starting a transaction
     catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
-    Assert.assertTrue("Table should be created", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should be 
created").isTrue();
 
-    Assertions.assertThatThrownBy(
+    assertThatThrownBy(
             () ->
                 catalog.newCreateTableTransaction(
                     TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, 
Maps.newHashMap()))
@@ -146,7 +146,7 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
   @Test
   public void testReplaceTableTxn() {
     catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, 
Maps.newHashMap());
-    Assert.assertTrue("Table should exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should 
exist").isTrue();
 
     Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, 
SCHEMA, false);
     txn.commitTransaction();
@@ -154,12 +154,14 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
     Table table = catalog.loadTable(TABLE_IDENTIFIER);
     PartitionSpec v1Expected =
         PartitionSpec.builderFor(table.schema()).alwaysNull("id", 
"id").withSpecId(1).build();
-    Assert.assertEquals("Table should have a spec with one void field", 
v1Expected, table.spec());
+    assertThat(table.spec())
+        .as("Table should have a spec with one void field")
+        .isEqualTo(v1Expected);
   }
 
   @Test
   public void testReplaceTableTxnTableNotExists() {
-    Assertions.assertThatThrownBy(
+    assertThatThrownBy(
             () -> catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, 
SPEC, false))
         .isInstanceOf(NoSuchTableException.class)
         .hasMessage("Table does not exist: hivedb.tbl");
@@ -168,7 +170,7 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
   @Test
   public void testReplaceTableTxnTableDeletedConcurrently() {
     catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, 
Maps.newHashMap());
-    Assert.assertTrue("Table should exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should 
exist").isTrue();
 
     Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, 
SCHEMA, SPEC, false);
 
@@ -176,7 +178,7 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
 
     txn.updateProperties().set("prop", "value").commit();
 
-    Assertions.assertThatThrownBy(txn::commitTransaction)
+    assertThatThrownBy(txn::commitTransaction)
         .isInstanceOf(NoSuchTableException.class)
         .hasMessage("No such table: hivedb.tbl");
   }
@@ -185,7 +187,7 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
   public void testReplaceTableTxnTableModifiedConcurrently() {
     Table table =
         catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, 
Maps.newHashMap());
-    Assert.assertTrue("Table should exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should 
exist").isTrue();
 
     Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, 
SCHEMA, SPEC, false);
 
@@ -197,26 +199,28 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
 
     // the replace should still succeed
     table = catalog.loadTable(TABLE_IDENTIFIER);
-    Assert.assertNull("Table props should be updated", 
table.properties().get("another-prop"));
-    Assert.assertEquals("Table props should match", "value", 
table.properties().get("prop"));
+    assertThat(table.properties())
+        .as("Table props should be updated")
+        .doesNotContainKey("another-prop")
+        .containsEntry("prop", "value");
   }
 
   @Test
   public void testCreateOrReplaceTableTxnTableNotExists() {
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
     Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, 
SCHEMA, SPEC, true);
     txn.updateProperties().set("prop", "value").commit();
     txn.commitTransaction();
 
     Table table = catalog.loadTable(TABLE_IDENTIFIER);
-    Assert.assertEquals("Table props should match", "value", 
table.properties().get("prop"));
+    assertThat(table.properties()).as("Table props should 
match").containsEntry("prop", "value");
   }
 
   @Test
   public void testCreateOrReplaceTableTxnTableExists() {
     catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, 
Maps.newHashMap());
-    Assert.assertTrue("Table should exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should 
exist").isTrue();
 
     Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, 
SCHEMA, true);
     txn.commitTransaction();
@@ -224,15 +228,16 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
     Table table = catalog.loadTable(TABLE_IDENTIFIER);
     PartitionSpec v1Expected =
         PartitionSpec.builderFor(table.schema()).alwaysNull("id", 
"id").withSpecId(1).build();
-    Assert.assertEquals("Table should have a spec with one void field", 
v1Expected, table.spec());
+    assertThat(table.spec())
+        .as("Table should have a spec with one void field")
+        .isEqualTo(v1Expected);
   }
 
   @Test
   public void testCreateOrReplaceTableTxnTableDeletedConcurrently() {
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
-
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
     catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
-    Assert.assertTrue("Table should be created", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should be 
created").isTrue();
 
     Transaction txn =
         catalog.newReplaceTableTransaction(
@@ -251,12 +256,12 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
     txn.commitTransaction();
 
     Table table = catalog.loadTable(TABLE_IDENTIFIER);
-    Assert.assertEquals("Table props should match", "value", 
table.properties().get("prop"));
+    assertThat(table.properties()).as("Table props should 
match").containsEntry("prop", "value");
   }
 
   @Test
   public void testCreateOrReplaceTableTxnTableCreatedConcurrently() {
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
     Transaction txn =
         catalog.newReplaceTableTransaction(
@@ -270,19 +275,21 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
 
     // create the table concurrently
     catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
-    Assert.assertTrue("Table should be created", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should be 
created").isTrue();
 
     // expect the transaction to succeed anyway
     txn.commitTransaction();
 
     Table table = catalog.loadTable(TABLE_IDENTIFIER);
-    Assert.assertEquals("Partition spec should match", 
PartitionSpec.unpartitioned(), table.spec());
-    Assert.assertEquals("Table props should match", "value", 
table.properties().get("prop"));
+    assertThat(table.spec())
+        .as("Partition spec should match")
+        .isEqualTo(PartitionSpec.unpartitioned());
+    assertThat(table.properties()).as("Table props should 
match").containsEntry("prop", "value");
   }
 
   @Test
   public void testCreateTableTxnWithGlobalTableLocation() {
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
     Transaction txn =
         catalog.newCreateTableTransaction(
@@ -300,6 +307,6 @@ public class HiveCreateReplaceTableTest extends 
HiveMetastoreTest {
 
     table.newAppend().appendFile(dataFile).commit();
 
-    Assert.assertEquals("Write should succeed", 1, 
Iterables.size(table.snapshots()));
+    assertThat(table.snapshots()).as("Write should succeed").hasSize(1);
   }
 }
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java
index 8c7adbc1f6..e48df0ce93 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java
@@ -28,8 +28,8 @@ import org.apache.iceberg.CatalogProperties;
 import org.apache.iceberg.CatalogUtil;
 import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
 import org.apache.iceberg.relocated.com.google.common.collect.Maps;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 public abstract class HiveMetastoreTest {
 
@@ -41,7 +41,7 @@ public abstract class HiveMetastoreTest {
   protected static HiveConf hiveConf;
   protected static TestHiveMetastore metastore;
 
-  @BeforeClass
+  @BeforeAll
   public static void startMetastore() throws Exception {
     startMetastore(Collections.emptyMap());
   }
@@ -72,7 +72,7 @@ public abstract class HiveMetastoreTest {
                 hiveConfWithOverrides);
   }
 
-  @AfterClass
+  @AfterAll
   public static void stopMetastore() throws Exception {
     HiveMetastoreTest.catalog = null;
 
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableBaseTest.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableBaseTest.java
index b49c61192a..51f4b59532 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableBaseTest.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableBaseTest.java
@@ -34,8 +34,8 @@ import org.apache.iceberg.Schema;
 import org.apache.iceberg.TableMetadataParser;
 import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.types.Types;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class HiveTableBaseTest extends HiveMetastoreTest {
 
@@ -56,13 +56,13 @@ public class HiveTableBaseTest extends HiveMetastoreTest {
 
   private Path tableLocation;
 
-  @Before
+  @BeforeEach
   public void createTestTable() {
     this.tableLocation =
         new Path(catalog.createTable(TABLE_IDENTIFIER, schema, 
partitionSpec).location());
   }
 
-  @After
+  @AfterEach
   public void dropTestTable() throws Exception {
     // drop the table data
     tableLocation.getFileSystem(hiveConf).delete(tableLocation, true);
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableTest.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableTest.java
index 5140563f7d..0b5edf21ae 100644
--- a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableTest.java
+++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableTest.java
@@ -28,9 +28,12 @@ import static 
org.apache.iceberg.BaseMetastoreTableOperations.TABLE_TYPE_PROP;
 import static org.apache.iceberg.TableMetadataParser.getFileExtension;
 import static org.apache.iceberg.types.Types.NestedField.optional;
 import static org.apache.iceberg.types.Types.NestedField.required;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Path;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -74,16 +77,13 @@ import 
org.apache.iceberg.relocated.com.google.common.collect.Lists;
 import org.apache.iceberg.relocated.com.google.common.collect.Maps;
 import org.apache.iceberg.types.Types;
 import org.apache.thrift.TException;
-import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 public class HiveTableTest extends HiveTableBaseTest {
   static final String NON_DEFAULT_DATABASE = "nondefault";
 
-  @Rule public TemporaryFolder tempFolder = new TemporaryFolder();
+  @TempDir private Path tempFolder;
 
   @Test
   public void testCreate() throws TException {
@@ -95,23 +95,23 @@ public class HiveTableTest extends HiveTableBaseTest {
 
     // check parameters are in expected state
     Map<String, String> parameters = table.getParameters();
-    Assert.assertNotNull(parameters);
-    
Assert.assertTrue(ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(parameters.get(TABLE_TYPE_PROP)));
-    Assert.assertTrue("EXTERNAL_TABLE".equalsIgnoreCase(table.getTableType()));
+    assertThat(parameters).isNotNull();
+    
assertThat(parameters.get(TABLE_TYPE_PROP)).isEqualToIgnoringCase(ICEBERG_TABLE_TYPE_VALUE);
+    assertThat(table.getTableType()).isEqualToIgnoringCase("EXTERNAL_TABLE");
 
     // Ensure the table is pointing to empty location
-    Assert.assertEquals(getTableLocation(tableName), 
table.getSd().getLocation());
+    
assertThat(table.getSd().getLocation()).isEqualTo(getTableLocation(tableName));
 
     // Ensure it is stored as unpartitioned table in hive.
-    Assert.assertEquals(0, table.getPartitionKeysSize());
+    assertThat(table.getPartitionKeysSize()).isEqualTo(0);
 
     // Only 1 snapshotFile Should exist and no manifests should exist
-    Assert.assertEquals(1, metadataVersionFiles(tableName).size());
-    Assert.assertEquals(0, manifestFiles(tableName).size());
+    assertThat(metadataVersionFiles(tableName)).hasSize(1);
+    assertThat(manifestFiles(tableName)).hasSize(0);
 
     final Table icebergTable = catalog.loadTable(TABLE_IDENTIFIER);
     // Iceberg schema should match the loaded table
-    Assert.assertEquals(schema.asStruct(), icebergTable.schema().asStruct());
+    assertThat(icebergTable.schema().asStruct()).isEqualTo(schema.asStruct());
   }
 
   @Test
@@ -122,25 +122,26 @@ public class HiveTableTest extends HiveTableBaseTest {
     Table original = catalog.loadTable(TABLE_IDENTIFIER);
 
     catalog.renameTable(TABLE_IDENTIFIER, renameTableIdentifier);
-    Assert.assertFalse(catalog.tableExists(TABLE_IDENTIFIER));
-    Assert.assertTrue(catalog.tableExists(renameTableIdentifier));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).isFalse();
+    assertThat(catalog.tableExists(renameTableIdentifier)).isTrue();
 
     Table renamed = catalog.loadTable(renameTableIdentifier);
 
-    Assert.assertEquals(original.schema().asStruct(), 
renamed.schema().asStruct());
-    Assert.assertEquals(original.spec(), renamed.spec());
-    Assert.assertEquals(original.location(), renamed.location());
-    Assert.assertEquals(original.currentSnapshot(), renamed.currentSnapshot());
+    
assertThat(renamed.schema().asStruct()).isEqualTo(original.schema().asStruct());
+    assertThat(renamed.spec()).isEqualTo(original.spec());
+    assertThat(renamed.location()).isEqualTo(original.location());
+    
assertThat(renamed.currentSnapshot()).isEqualTo(original.currentSnapshot());
 
-    Assert.assertTrue(catalog.dropTable(renameTableIdentifier));
+    assertThat(catalog.dropTable(renameTableIdentifier)).isTrue();
   }
 
   @Test
   public void testDrop() {
-    Assert.assertTrue("Table should exist", 
catalog.tableExists(TABLE_IDENTIFIER));
-    Assert.assertTrue(
-        "Drop should return true and drop the table", 
catalog.dropTable(TABLE_IDENTIFIER));
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should 
exist").isTrue();
+    assertThat(catalog.dropTable(TABLE_IDENTIFIER))
+        .as("Drop should return true and drop the table")
+        .isTrue();
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
   }
 
   @Test
@@ -152,13 +153,13 @@ public class HiveTableTest extends HiveTableBaseTest {
     String manifestListLocation =
         table.currentSnapshot().manifestListLocation().replace("file:", "");
 
-    Assert.assertTrue(
-        "Drop should return true and drop the table",
-        catalog.dropTable(TABLE_IDENTIFIER, false /* do not delete underlying 
files */));
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.dropTable(TABLE_IDENTIFIER, false /* do not delete 
underlying files */))
+        .as("Drop should return true and drop the table")
+        .isTrue();
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
-    Assert.assertTrue("Table data files should exist", new 
File(fileLocation).exists());
-    Assert.assertTrue("Table metadata files should exist", new 
File(manifestListLocation).exists());
+    assertThat(new File(fileLocation)).as("Table data files should 
exist").exists();
+    assertThat(new File(manifestListLocation)).as("Table metadata files should 
exist").exists();
   }
 
   @Test
@@ -214,29 +215,30 @@ public class HiveTableTest extends HiveTableBaseTest {
 
     List<ManifestFile> manifests = 
table.currentSnapshot().allManifests(table.io());
 
-    Assert.assertTrue(
-        "Drop (table and data) should return true and drop the table",
-        catalog.dropTable(TABLE_IDENTIFIER));
-    Assert.assertFalse("Table should not exist", 
catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.dropTable(TABLE_IDENTIFIER))
+        .as("Drop (table and data) should return true and drop the table")
+        .isTrue();
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not 
exist").isFalse();
 
-    Assert.assertFalse("Table data files should not exist", new 
File(location1).exists());
-    Assert.assertFalse("Table data files should not exist", new 
File(location2).exists());
-    Assert.assertFalse(
-        "Table manifest list files should not exist", new 
File(manifestListLocation).exists());
+    assertThat(new File(location1)).as("Table data files should not 
exist").doesNotExist();
+    assertThat(new File(location2)).as("Table data files should not 
exist").doesNotExist();
+    assertThat(new File(manifestListLocation))
+        .as("Table manifest list files should not exist")
+        .doesNotExist();
     for (ManifestFile manifest : manifests) {
-      Assert.assertFalse(
-          "Table manifest files should not exist",
-          new File(manifest.path().replace("file:", "")).exists());
+      assertThat(new File(manifest.path().replace("file:", "")))
+          .as("Table manifest files should not exist")
+          .doesNotExist();
     }
-    Assert.assertFalse(
-        "Table metadata file should not exist",
-        new File(
+    assertThat(
+            new File(
                 ((HasTableOperations) table)
                     .operations()
                     .current()
                     .metadataFileLocation()
-                    .replace("file:", ""))
-            .exists());
+                    .replace("file:", "")))
+        .as("Table metadata file should not exist")
+        .doesNotExist();
   }
 
   @Test
@@ -248,9 +250,9 @@ public class HiveTableTest extends HiveTableBaseTest {
     icebergTable = catalog.loadTable(TABLE_IDENTIFIER);
 
     // Only 2 snapshotFile Should exist and no manifests should exist
-    Assert.assertEquals(2, metadataVersionFiles(TABLE_NAME).size());
-    Assert.assertEquals(0, manifestFiles(TABLE_NAME).size());
-    Assert.assertEquals(altered.asStruct(), icebergTable.schema().asStruct());
+    assertThat(metadataVersionFiles(TABLE_NAME)).hasSize(2);
+    assertThat(manifestFiles(TABLE_NAME)).hasSize(0);
+    assertThat(icebergTable.schema().asStruct()).isEqualTo(altered.asStruct());
 
     final org.apache.hadoop.hive.metastore.api.Table table =
         metastoreClient.getTable(DB_NAME, TABLE_NAME);
@@ -258,7 +260,7 @@ public class HiveTableTest extends HiveTableBaseTest {
         
table.getSd().getCols().stream().map(FieldSchema::getName).collect(Collectors.toList());
     final List<String> icebergColumns =
         
altered.columns().stream().map(Types.NestedField::name).collect(Collectors.toList());
-    Assert.assertEquals(icebergColumns, hiveColumns);
+    assertThat(hiveColumns).isEqualTo(icebergColumns);
   }
 
   @Test
@@ -285,10 +287,9 @@ public class HiveTableTest extends HiveTableBaseTest {
         .addColumn("int", Types.IntegerType.get())
         .commit();
 
-    Assert.assertEquals(
-        "Schema should match expected",
-        expectedSchema.asStruct(),
-        icebergTable.schema().asStruct());
+    assertThat(icebergTable.schema().asStruct())
+        .as("Schema should match expected")
+        .isEqualTo(expectedSchema.asStruct());
 
     expectedSchema =
         new Schema(
@@ -299,10 +300,9 @@ public class HiveTableTest extends HiveTableBaseTest {
                 .fields());
     icebergTable.updateSchema().deleteColumn("string").commit();
 
-    Assert.assertEquals(
-        "Schema should match expected",
-        expectedSchema.asStruct(),
-        icebergTable.schema().asStruct());
+    assertThat(icebergTable.schema().asStruct())
+        .as("Schema should match expected")
+        .isEqualTo(expectedSchema.asStruct());
   }
 
   @Test
@@ -313,7 +313,7 @@ public class HiveTableTest extends HiveTableBaseTest {
     String dummyLocation = "dummylocation";
     table.getParameters().put(METADATA_LOCATION_PROP, dummyLocation);
     metastoreClient.alter_table(DB_NAME, TABLE_NAME, table);
-    Assertions.assertThatThrownBy(
+    assertThatThrownBy(
             () -> icebergTable.updateSchema().addColumn("data", 
Types.LongType.get()).commit())
         .isInstanceOf(CommitFailedException.class)
         .hasMessageContaining("is not same as the current table metadata 
location 'dummylocation'");
@@ -327,8 +327,8 @@ public class HiveTableTest extends HiveTableBaseTest {
             .filter(t -> t.namespace().level(0).equals(DB_NAME) && 
t.name().equals(TABLE_NAME))
             .collect(Collectors.toList());
 
-    Assert.assertEquals(1, expectedIdents.size());
-    Assert.assertTrue(catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(expectedIdents).hasSize(1);
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).isTrue();
 
     // create a hive table
     String hiveTableName = "test_hive_table";
@@ -337,13 +337,13 @@ public class HiveTableTest extends HiveTableBaseTest {
 
     catalog.setListAllTables(false);
     List<TableIdentifier> tableIdents1 = 
catalog.listTables(TABLE_IDENTIFIER.namespace());
-    Assert.assertEquals("should only 1 iceberg table .", 1, 
tableIdents1.size());
+    assertThat(tableIdents1).as("should only 1 iceberg table .").hasSize(1);
 
     catalog.setListAllTables(true);
     List<TableIdentifier> tableIdents2 = 
catalog.listTables(TABLE_IDENTIFIER.namespace());
-    Assert.assertEquals("should be 2 tables in namespace .", 2, 
tableIdents2.size());
+    assertThat(tableIdents2).as("should be 2 tables in namespace 
.").hasSize(2);
 
-    Assert.assertTrue(catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).isTrue();
     metastoreClient.dropTable(DB_NAME, hiveTableName);
   }
 
@@ -362,7 +362,7 @@ public class HiveTableTest extends HiveTableBaseTest {
     StorageDescriptor sd =
         new StorageDescriptor(
             Lists.newArrayList(),
-            tempFolder.newFolder().getAbsolutePath(),
+            tempFolder.toAbsolutePath().toString(),
             "org.apache.hadoop.mapred.TextInputFormat",
             "org.apache.hadoop.mapred.TextOutputFormat",
             false,
@@ -400,14 +400,14 @@ public class HiveTableTest extends HiveTableBaseTest {
         namespace, Collections.singletonMap("location", 
nonDefaultLocation.getPath()));
     Map<String, String> namespaceMeta = 
catalog.loadNamespaceMetadata(namespace);
     // Make sure that we are testing a namespace with a non default location :)
-    Assert.assertEquals(namespaceMeta.get("location"), "file:" + 
nonDefaultLocation.getPath());
+    assertThat("file:" + 
nonDefaultLocation.getPath()).isEqualTo(namespaceMeta.get("location"));
 
     TableIdentifier tableIdentifier = TableIdentifier.of(namespace, 
TABLE_NAME);
     catalog.createTable(tableIdentifier, schema);
 
     // Let's check the location loaded through the catalog
     Table table = catalog.loadTable(tableIdentifier);
-    Assert.assertEquals(namespaceMeta.get("location") + "/" + TABLE_NAME, 
table.location());
+    assertThat(table.location()).isEqualTo(namespaceMeta.get("location") + "/" 
+ TABLE_NAME);
 
     // Drop the database and purge the files
     metastoreClient.dropDatabase(NON_DEFAULT_DATABASE, true, true, true);
@@ -419,16 +419,15 @@ public class HiveTableTest extends HiveTableBaseTest {
         metastoreClient.getTable(DB_NAME, TABLE_NAME);
 
     Map<String, String> originalParams = originalTable.getParameters();
-    Assert.assertNotNull(originalParams);
-    Assert.assertTrue(
-        
ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(originalParams.get(TABLE_TYPE_PROP)));
-    
Assert.assertTrue("EXTERNAL_TABLE".equalsIgnoreCase(originalTable.getTableType()));
+    assertThat(originalParams).isNotNull();
+    
assertThat(originalParams.get(TABLE_TYPE_PROP)).isEqualToIgnoringCase(ICEBERG_TABLE_TYPE_VALUE);
+    
assertThat(originalTable.getTableType()).isEqualToIgnoringCase("EXTERNAL_TABLE");
 
     catalog.dropTable(TABLE_IDENTIFIER, false);
-    Assert.assertFalse(catalog.tableExists(TABLE_IDENTIFIER));
+    assertThat(catalog.tableExists(TABLE_IDENTIFIER)).isFalse();
 
     List<String> metadataVersionFiles = metadataVersionFiles(TABLE_NAME);
-    Assert.assertEquals(1, metadataVersionFiles.size());
+    assertThat(metadataVersionFiles).hasSize(1);
 
     catalog.registerTable(TABLE_IDENTIFIER, "file:" + 
metadataVersionFiles.get(0));
 
@@ -436,18 +435,17 @@ public class HiveTableTest extends HiveTableBaseTest {
         metastoreClient.getTable(DB_NAME, TABLE_NAME);
 
     Map<String, String> newTableParameters = newTable.getParameters();
-    Assert.assertNull(newTableParameters.get(PREVIOUS_METADATA_LOCATION_PROP));
-    Assert.assertEquals(
-        originalParams.get(TABLE_TYPE_PROP), 
newTableParameters.get(TABLE_TYPE_PROP));
-    Assert.assertEquals(
-        originalParams.get(METADATA_LOCATION_PROP), 
newTableParameters.get(METADATA_LOCATION_PROP));
-    Assert.assertEquals(originalTable.getSd(), newTable.getSd());
+    assertThat(newTableParameters)
+        .doesNotContainKey(PREVIOUS_METADATA_LOCATION_PROP)
+        .containsEntry(TABLE_TYPE_PROP, originalParams.get(TABLE_TYPE_PROP))
+        .containsEntry(METADATA_LOCATION_PROP, 
originalParams.get(METADATA_LOCATION_PROP));
+    assertThat(newTable.getSd()).isEqualTo(originalTable.getSd());
   }
 
   @Test
   public void testRegisterHadoopTableToHiveCatalog() throws IOException, 
TException {
     // create a hadoop catalog
-    String tableLocation = tempFolder.newFolder().toString();
+    String tableLocation = tempFolder.toString();
     HadoopCatalog hadoopCatalog = new HadoopCatalog(new Configuration(), 
tableLocation);
     // create table using hadoop catalog
     TableIdentifier identifier = TableIdentifier.of(DB_NAME, "table1");
@@ -457,8 +455,8 @@ public class HiveTableTest extends HiveTableBaseTest {
     // insert some data
     String file1Location = appendData(table, "file1");
     List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
-    Assert.assertEquals("Should scan 1 file", 1, tasks.size());
-    Assert.assertEquals(tasks.get(0).file().path(), file1Location);
+    assertThat(tasks).as("Should scan 1 file").hasSize(1);
+    assertThat(file1Location).isEqualTo(tasks.get(0).file().path());
 
     // collect metadata file
     List<String> metadataFiles =
@@ -466,31 +464,31 @@ public class HiveTableTest extends HiveTableBaseTest {
             .map(File::getAbsolutePath)
             .filter(f -> 
f.endsWith(getFileExtension(TableMetadataParser.Codec.NONE)))
             .collect(Collectors.toList());
-    Assert.assertEquals(2, metadataFiles.size());
+    assertThat(metadataFiles).hasSize(2);
 
-    Assertions.assertThatThrownBy(() -> metastoreClient.getTable(DB_NAME, 
"table1"))
+    assertThatThrownBy(() -> metastoreClient.getTable(DB_NAME, "table1"))
         .isInstanceOf(NoSuchObjectException.class)
         .hasMessage("hivedb.table1 table not found");
-    Assertions.assertThatThrownBy(() -> catalog.loadTable(identifier))
+    assertThatThrownBy(() -> catalog.loadTable(identifier))
         .isInstanceOf(NoSuchTableException.class)
         .hasMessage("Table does not exist: hivedb.table1");
 
     // register the table to hive catalog using the latest metadata file
     String latestMetadataFile = ((BaseTable) 
table).operations().current().metadataFileLocation();
     catalog.registerTable(identifier, "file:" + latestMetadataFile);
-    Assert.assertNotNull(metastoreClient.getTable(DB_NAME, "table1"));
+    assertThat(metastoreClient.getTable(DB_NAME, "table1")).isNotNull();
 
     // load the table in hive catalog
     table = catalog.loadTable(identifier);
-    Assert.assertNotNull(table);
+    assertThat(table).isNotNull();
 
     // insert some data
     String file2Location = appendData(table, "file2");
     tasks = Lists.newArrayList(table.newScan().planFiles());
-    Assert.assertEquals("Should scan 2 files", 2, tasks.size());
+    assertThat(tasks).as("Should scan 2 files").hasSize(2);
     Set<String> files =
         tasks.stream().map(task -> 
task.file().path().toString()).collect(Collectors.toSet());
-    Assert.assertTrue(files.contains(file1Location) && 
files.contains(file2Location));
+    assertThat(files).contains(file1Location, file2Location);
   }
 
   private String appendData(Table table, String fileName) throws IOException {
@@ -528,16 +526,15 @@ public class HiveTableTest extends HiveTableBaseTest {
         metastoreClient.getTable(DB_NAME, TABLE_NAME);
 
     Map<String, String> originalParams = originalTable.getParameters();
-    Assert.assertNotNull(originalParams);
-    Assert.assertTrue(
-        
ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(originalParams.get(TABLE_TYPE_PROP)));
-    
Assert.assertTrue("EXTERNAL_TABLE".equalsIgnoreCase(originalTable.getTableType()));
+    assertThat(originalParams).isNotNull();
+    
assertThat(originalParams.get(TABLE_TYPE_PROP)).isEqualToIgnoringCase(ICEBERG_TABLE_TYPE_VALUE);
+    
assertThat(originalTable.getTableType()).isEqualToIgnoringCase("EXTERNAL_TABLE");
 
     List<String> metadataVersionFiles = metadataVersionFiles(TABLE_NAME);
-    Assert.assertEquals(1, metadataVersionFiles.size());
+    assertThat(metadataVersionFiles).hasSize(1);
 
     // Try to register an existing table
-    Assertions.assertThatThrownBy(
+    assertThatThrownBy(
             () -> catalog.registerTable(TABLE_IDENTIFIER, "file:" + 
metadataVersionFiles.get(0)))
         .isInstanceOf(AlreadyExistsException.class)
         .hasMessage("Table already exists: hivedb.tbl");
@@ -618,35 +615,35 @@ public class HiveTableTest extends HiveTableBaseTest {
     File realLocation = new File(metadataLocation(TABLE_NAME));
     File fakeLocation = new File(metadataLocation(TABLE_NAME) + "_dummy");
 
-    Assert.assertTrue(realLocation.renameTo(fakeLocation));
-    Assertions.assertThatThrownBy(() -> catalog.loadTable(TABLE_IDENTIFIER))
+    assertThat(realLocation.renameTo(fakeLocation)).isTrue();
+    assertThatThrownBy(() -> catalog.loadTable(TABLE_IDENTIFIER))
         .isInstanceOf(NotFoundException.class)
         .hasMessageStartingWith("Failed to open input stream for file");
-    Assert.assertTrue(fakeLocation.renameTo(realLocation));
+    assertThat(fakeLocation.renameTo(realLocation)).isTrue();
   }
 
   private void assertHiveEnabled(
       org.apache.hadoop.hive.metastore.api.Table hmsTable, boolean expected) {
     if (expected) {
-      Assert.assertEquals(
-          "org.apache.iceberg.mr.hive.HiveIcebergStorageHandler",
-          
hmsTable.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE));
-      Assert.assertEquals(
-          "org.apache.iceberg.mr.hive.HiveIcebergSerDe",
-          hmsTable.getSd().getSerdeInfo().getSerializationLib());
-      Assert.assertEquals(
-          "org.apache.iceberg.mr.hive.HiveIcebergInputFormat", 
hmsTable.getSd().getInputFormat());
-      Assert.assertEquals(
-          "org.apache.iceberg.mr.hive.HiveIcebergOutputFormat", 
hmsTable.getSd().getOutputFormat());
+      assertThat(hmsTable.getParameters())
+          .containsEntry(
+              hive_metastoreConstants.META_TABLE_STORAGE,
+              "org.apache.iceberg.mr.hive.HiveIcebergStorageHandler");
+      assertThat(hmsTable.getSd().getSerdeInfo().getSerializationLib())
+          .isEqualTo("org.apache.iceberg.mr.hive.HiveIcebergSerDe");
+      assertThat(hmsTable.getSd().getInputFormat())
+          .isEqualTo("org.apache.iceberg.mr.hive.HiveIcebergInputFormat");
+      assertThat(hmsTable.getSd().getOutputFormat())
+          .isEqualTo("org.apache.iceberg.mr.hive.HiveIcebergOutputFormat");
     } else {
-      
Assert.assertNull(hmsTable.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE));
-      Assert.assertEquals(
-          "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
-          hmsTable.getSd().getSerdeInfo().getSerializationLib());
-      Assert.assertEquals(
-          "org.apache.hadoop.mapred.FileInputFormat", 
hmsTable.getSd().getInputFormat());
-      Assert.assertEquals(
-          "org.apache.hadoop.mapred.FileOutputFormat", 
hmsTable.getSd().getOutputFormat());
+      assertThat(hmsTable.getParameters())
+          .doesNotContainKey(hive_metastoreConstants.META_TABLE_STORAGE);
+      assertThat(hmsTable.getSd().getSerdeInfo().getSerializationLib())
+          .isEqualTo("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
+      assertThat(hmsTable.getSd().getInputFormat())
+          .isEqualTo("org.apache.hadoop.mapred.FileInputFormat");
+      assertThat(hmsTable.getSd().getOutputFormat())
+          .isEqualTo("org.apache.hadoop.mapred.FileOutputFormat");
     }
   }
 }
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestCachedClientPool.java
 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestCachedClientPool.java
index 2c2d256a45..19b9b0effb 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestCachedClientPool.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestCachedClientPool.java
@@ -20,6 +20,8 @@ package org.apache.iceberg.hive;
 
 import static org.apache.iceberg.CatalogUtil.ICEBERG_CATALOG_TYPE;
 import static org.apache.iceberg.CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 
 import java.security.PrivilegedAction;
 import java.util.Collections;
@@ -31,9 +33,7 @@ import org.apache.iceberg.CatalogUtil;
 import org.apache.iceberg.exceptions.ValidationException;
 import org.apache.iceberg.hive.CachedClientPool.Key;
 import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
-import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestCachedClientPool extends HiveMetastoreTest {
 
@@ -41,21 +41,22 @@ public class TestCachedClientPool extends HiveMetastoreTest 
{
   public void testClientPoolCleaner() throws InterruptedException {
     CachedClientPool clientPool = new CachedClientPool(hiveConf, 
Collections.emptyMap());
     HiveClientPool clientPool1 = clientPool.clientPool();
-    Assertions.assertThat(
+    assertThat(clientPool1)
+        .isSameAs(
             CachedClientPool.clientPoolCache()
-                .getIfPresent(CachedClientPool.extractKey(null, hiveConf)))
-        .isSameAs(clientPool1);
+                .getIfPresent(CachedClientPool.extractKey(null, hiveConf)));
     TimeUnit.MILLISECONDS.sleep(EVICTION_INTERVAL - 
TimeUnit.SECONDS.toMillis(2));
     HiveClientPool clientPool2 = clientPool.clientPool();
-    Assert.assertSame(clientPool1, clientPool2);
+    assertThat(clientPool2).isSameAs(clientPool1);
     TimeUnit.MILLISECONDS.sleep(EVICTION_INTERVAL + 
TimeUnit.SECONDS.toMillis(5));
-    Assert.assertNull(
-        CachedClientPool.clientPoolCache()
-            .getIfPresent(CachedClientPool.extractKey(null, hiveConf)));
+    assertThat(
+            CachedClientPool.clientPoolCache()
+                .getIfPresent(CachedClientPool.extractKey(null, hiveConf)))
+        .isNull();
 
     // The client has been really closed.
-    Assert.assertTrue(clientPool1.isClosed());
-    Assert.assertTrue(clientPool2.isClosed());
+    assertThat(clientPool1.isClosed()).isTrue();
+    assertThat(clientPool2.isClosed()).isTrue();
   }
 
   @Test
@@ -73,19 +74,22 @@ public class TestCachedClientPool extends HiveMetastoreTest 
{
         foo2.doAs(
             (PrivilegedAction<Key>)
                 () -> CachedClientPool.extractKey("conf:key1,user_name", 
hiveConf));
-    Assert.assertEquals("Key elements order shouldn't matter", key1, key2);
+    assertThat(key2).as("Key elements order shouldn't matter").isEqualTo(key1);
 
     key1 = foo1.doAs((PrivilegedAction<Key>) () -> 
CachedClientPool.extractKey("ugi", hiveConf));
     key2 = bar.doAs((PrivilegedAction<Key>) () -> 
CachedClientPool.extractKey("ugi", hiveConf));
-    Assert.assertNotEquals("Different users are not supposed to be 
equivalent", key1, key2);
+    assertThat(key2).as("Different users are not supposed to be 
equivalent").isNotEqualTo(key1);
 
     key2 = foo2.doAs((PrivilegedAction<Key>) () -> 
CachedClientPool.extractKey("ugi", hiveConf));
-    Assert.assertNotEquals("Different UGI instances are not supposed to be 
equivalent", key1, key2);
+    assertThat(key2)
+        .as("Different UGI instances are not supposed to be equivalent")
+        .isNotEqualTo(key1);
 
     key1 = CachedClientPool.extractKey("ugi", hiveConf);
     key2 = CachedClientPool.extractKey("ugi,conf:key1", hiveConf);
-    Assert.assertNotEquals(
-        "Keys with different number of elements are not supposed to be 
equivalent", key1, key2);
+    assertThat(key2)
+        .as("Keys with different number of elements are not supposed to be 
equivalent")
+        .isNotEqualTo(key1);
 
     Configuration conf1 = new Configuration(hiveConf);
     Configuration conf2 = new Configuration(hiveConf);
@@ -93,31 +97,33 @@ public class TestCachedClientPool extends HiveMetastoreTest 
{
     conf1.set("key1", "val");
     key1 = CachedClientPool.extractKey("conf:key1", conf1);
     key2 = CachedClientPool.extractKey("conf:key1", conf2);
-    Assert.assertNotEquals(
-        "Config with different values are not supposed to be equivalent", 
key1, key2);
+    assertThat(key2)
+        .as("Config with different values are not supposed to be equivalent")
+        .isNotEqualTo(key1);
 
     conf2.set("key1", "val");
     conf2.set("key2", "val");
     key2 = CachedClientPool.extractKey("conf:key2", conf2);
-    Assert.assertNotEquals(
-        "Config with different keys are not supposed to be equivalent", key1, 
key2);
+    assertThat(key2)
+        .as("Config with different keys are not supposed to be equivalent")
+        .isNotEqualTo(key1);
 
     key1 = CachedClientPool.extractKey("conf:key1,ugi", conf1);
     key2 = CachedClientPool.extractKey("ugi,conf:key1", conf2);
-    Assert.assertEquals("Config with same key/value should be equivalent", 
key1, key2);
+    assertThat(key2).as("Config with same key/value should be 
equivalent").isEqualTo(key1);
 
     conf1.set("key2", "val");
     key1 = CachedClientPool.extractKey("conf:key2 ,conf:key1", conf1);
     key2 = CachedClientPool.extractKey("conf:key2,conf:key1", conf2);
-    Assert.assertEquals("Config with same key/value should be equivalent", 
key1, key2);
+    assertThat(key2).as("Config with same key/value should be 
equivalent").isEqualTo(key1);
 
-    Assertions.assertThatThrownBy(
+    assertThatThrownBy(
             () -> CachedClientPool.extractKey("ugi,ugi", hiveConf),
             "Duplicate key elements should result in an error")
         .isInstanceOf(ValidationException.class)
         .hasMessageContaining("UGI key element already specified");
 
-    Assertions.assertThatThrownBy(
+    assertThatThrownBy(
             () -> CachedClientPool.extractKey("conf:k1,conf:k2,CONF:k1", 
hiveConf),
             "Duplicate conf key elements should result in an error")
         .isInstanceOf(ValidationException.class)
@@ -153,16 +159,16 @@ public class TestCachedClientPool extends 
HiveMetastoreTest {
     HiveClientPool pool3 = ((CachedClientPool) 
catalog3.clientPool()).clientPool();
     HiveClientPool pool4 = ((CachedClientPool) 
catalog4.clientPool()).clientPool();
 
-    Assert.assertSame(pool1, pool2);
-    Assert.assertNotSame(pool3, pool1);
-    Assert.assertNotSame(pool3, pool2);
-    Assert.assertNotSame(pool3, pool4);
-    Assert.assertNotSame(pool4, pool1);
-    Assert.assertNotSame(pool4, pool2);
+    assertThat(pool2).isSameAs(pool1);
+    assertThat(pool1).isNotSameAs(pool3);
+    assertThat(pool2).isNotSameAs(pool3);
+    assertThat(pool4).isNotSameAs(pool3);
+    assertThat(pool1).isNotSameAs(pool4);
+    assertThat(pool2).isNotSameAs(pool4);
 
-    Assert.assertEquals("foo", 
pool1.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG));
-    Assert.assertEquals("bar", 
pool3.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG));
-    Assert.assertNull(pool4.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG));
+    
assertThat(pool1.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG)).isEqualTo("foo");
+    
assertThat(pool3.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG)).isEqualTo("bar");
+    assertThat(pool4.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG)).isNull();
 
     pool1.close();
     pool3.close();
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java
index c60740c854..d4ac498684 100644
--- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java
+++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java
@@ -26,14 +26,17 @@ import static 
org.apache.iceberg.TableProperties.CURRENT_SNAPSHOT_SUMMARY;
 import static org.apache.iceberg.TableProperties.CURRENT_SNAPSHOT_TIMESTAMP;
 import static org.apache.iceberg.TableProperties.DEFAULT_PARTITION_SPEC;
 import static org.apache.iceberg.TableProperties.DEFAULT_SORT_ORDER;
+import static org.apache.iceberg.TableProperties.SNAPSHOT_COUNT;
 import static org.apache.iceberg.expressions.Expressions.bucket;
 import static org.apache.iceberg.types.Types.NestedField.required;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatNoException;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.nio.file.Path;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -79,11 +82,8 @@ import org.apache.iceberg.transforms.Transforms;
 import org.apache.iceberg.types.Types;
 import org.apache.iceberg.util.JsonUtil;
 import org.apache.thrift.TException;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.jupiter.api.Assertions;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 public class TestHiveCatalog extends HiveMetastoreTest {
   private static ImmutableMap meta =
@@ -92,7 +92,7 @@ public class TestHiveCatalog extends HiveMetastoreTest {
           "group", "iceberg",
           "comment", "iceberg  hiveCatalog test");
 
-  @Rule public TemporaryFolder temp = new TemporaryFolder();
+  @TempDir private Path temp;
 
   private Schema getTestSchema() {
     return new Schema(
@@ -105,7 +105,7 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     Schema schema = getTestSchema();
     PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 
16).build();
     TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
-    String location = temp.newFolder("tbl").toString();
+    String location = temp.resolve("tbl").toString();
 
     try {
       Table table =
@@ -117,11 +117,11 @@ public class TestHiveCatalog extends HiveMetastoreTest {
               .withProperty("key2", "value2")
               .create();
 
-      Assert.assertEquals(location, table.location());
-      Assert.assertEquals(2, table.schema().columns().size());
-      Assert.assertEquals(1, table.spec().fields().size());
-      Assert.assertEquals("value1", table.properties().get("key1"));
-      Assert.assertEquals("value2", table.properties().get("key2"));
+      assertThat(table.location()).isEqualTo(location);
+      assertThat(table.schema().columns()).hasSize(2);
+      assertThat(table.spec().fields()).hasSize(1);
+      assertThat(table.properties()).containsEntry("key1", "value1");
+      assertThat(table.properties()).containsEntry("key2", "value2");
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -132,18 +132,18 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     Schema schema = getTestSchema();
     PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 
16).build();
     TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
-    String location = temp.newFolder("tbl").toString();
+    String location = temp.resolve("tbl").toString();
     ImmutableMap<String, String> properties = ImmutableMap.of("key1", 
"value1", "key2", "value2");
     Catalog cachingCatalog = CachingCatalog.wrap(catalog);
 
     try {
       Table table = cachingCatalog.createTable(tableIdent, schema, spec, 
location, properties);
 
-      Assert.assertEquals(location, table.location());
-      Assert.assertEquals(2, table.schema().columns().size());
-      Assert.assertEquals(1, table.spec().fields().size());
-      Assert.assertEquals("value1", table.properties().get("key1"));
-      Assert.assertEquals("value2", table.properties().get("key2"));
+      assertThat(table.location()).isEqualTo(location);
+      assertThat(table.schema().columns()).hasSize(2);
+      assertThat(table.spec().fields()).hasSize(1);
+      assertThat(table.properties()).containsEntry("key1", "value1");
+      assertThat(table.properties()).containsEntry("key2", "value2");
     } finally {
       cachingCatalog.dropTable(tableIdent);
     }
@@ -151,20 +151,22 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
   @Test
   public void testInitialize() {
-    Assertions.assertDoesNotThrow(
-        () -> {
-          HiveCatalog catalog = new HiveCatalog();
-          catalog.initialize("hive", Maps.newHashMap());
-        });
+    assertThatNoException()
+        .isThrownBy(
+            () -> {
+              HiveCatalog catalog = new HiveCatalog();
+              catalog.initialize("hive", Maps.newHashMap());
+            });
   }
 
   @Test
   public void testToStringWithoutSetConf() {
-    Assertions.assertDoesNotThrow(
-        () -> {
-          HiveCatalog catalog = new HiveCatalog();
-          catalog.toString();
-        });
+    assertThatNoException()
+        .isThrownBy(
+            () -> {
+              HiveCatalog catalog = new HiveCatalog();
+              catalog.toString();
+            });
   }
 
   @Test
@@ -175,16 +177,16 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     HiveCatalog catalog = new HiveCatalog();
     catalog.initialize("hive", properties);
 
-    Assert.assertEquals(catalog.getConf().get("hive.metastore.uris"), 
"thrift://examplehost:9083");
-    Assert.assertEquals(
-        catalog.getConf().get("hive.metastore.warehouse.dir"), 
"/user/hive/testwarehouse");
+    
assertThat(catalog.getConf().get("hive.metastore.uris")).isEqualTo("thrift://examplehost:9083");
+    assertThat(catalog.getConf().get("hive.metastore.warehouse.dir"))
+        .isEqualTo("/user/hive/testwarehouse");
   }
 
   @Test
   public void testCreateTableTxnBuilder() throws Exception {
     Schema schema = getTestSchema();
     TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
-    String location = temp.newFolder("tbl").toString();
+    String location = temp.resolve("tbl").toString();
 
     try {
       Transaction txn =
@@ -192,9 +194,9 @@ public class TestHiveCatalog extends HiveMetastoreTest {
       txn.commitTransaction();
       Table table = catalog.loadTable(tableIdent);
 
-      Assert.assertEquals(location, table.location());
-      Assert.assertEquals(2, table.schema().columns().size());
-      Assert.assertTrue(table.spec().isUnpartitioned());
+      assertThat(table.location()).isEqualTo(location);
+      assertThat(table.schema().columns()).hasSize(2);
+      assertThat(table.spec().isUnpartitioned()).isTrue();
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -205,7 +207,7 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     Schema schema = getTestSchema();
     PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 
16).build();
     TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
-    String location = temp.newFolder("tbl").toString();
+    String location = temp.resolve("tbl").toString();
 
     try {
       Transaction createTxn =
@@ -218,9 +220,9 @@ public class TestHiveCatalog extends HiveMetastoreTest {
       createTxn.commitTransaction();
 
       Table table = catalog.loadTable(tableIdent);
-      Assert.assertEquals(1, table.spec().fields().size());
+      assertThat(table.spec().fields()).hasSize(1);
 
-      String newLocation = temp.newFolder("tbl-2").toString();
+      String newLocation = temp.resolve("tbl-2").toString();
 
       Transaction replaceTxn =
           catalog
@@ -231,17 +233,19 @@ public class TestHiveCatalog extends HiveMetastoreTest {
       replaceTxn.commitTransaction();
 
       table = catalog.loadTable(tableIdent);
-      Assert.assertEquals(newLocation, table.location());
-      Assert.assertNull(table.currentSnapshot());
+      assertThat(table.location()).isEqualTo(newLocation);
+      assertThat(table.currentSnapshot()).isNull();
       PartitionSpec v1Expected =
           PartitionSpec.builderFor(table.schema())
               .alwaysNull("data", "data_bucket")
               .withSpecId(1)
               .build();
-      Assert.assertEquals("Table should have a spec with one void field", 
v1Expected, table.spec());
+      assertThat(table.spec())
+          .as("Table should have a spec with one void field")
+          .isEqualTo(v1Expected);
 
-      Assert.assertEquals("value1", table.properties().get("key1"));
-      Assert.assertEquals("value2", table.properties().get("key2"));
+      assertThat(table.properties()).containsEntry("key1", "value1");
+      assertThat(table.properties()).containsEntry("key2", "value2");
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -267,13 +271,13 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     Schema schema = getTestSchema();
     PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 
16).build();
     TableIdentifier tableIdent = TableIdentifier.of(db, tbl);
-    String location = temp.newFolder(tbl).toString();
+    String location = temp.resolve(tbl).toString();
     try {
       Table table = catalog.createTable(tableIdent, schema, spec, location, 
properties);
       org.apache.hadoop.hive.metastore.api.Table hmsTable = 
metastoreClient.getTable(db, tbl);
-      Assert.assertEquals(owner, hmsTable.getOwner());
+      assertThat(hmsTable.getOwner()).isEqualTo(owner);
       Map<String, String> hmsTableParams = hmsTable.getParameters();
-      
Assert.assertFalse(hmsTableParams.containsKey(HiveCatalog.HMS_TABLE_OWNER));
+      
assertThat(hmsTableParams).doesNotContainKey(HiveCatalog.HMS_TABLE_OWNER);
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -287,12 +291,12 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
     try {
       Table table = catalog.createTable(tableIdent, schema, spec);
-      Assert.assertEquals("Order ID must match", 0, 
table.sortOrder().orderId());
-      Assert.assertTrue("Order must unsorted", table.sortOrder().isUnsorted());
+      assertThat(table.sortOrder().orderId()).as("Order ID must 
match").isEqualTo(0);
+      assertThat(table.sortOrder().isUnsorted()).as("Order must 
unsorted").isTrue();
 
-      Assert.assertFalse(
-          "Must not have default sort order in catalog",
-          hmsTableParameters().containsKey(DEFAULT_SORT_ORDER));
+      assertThat(hmsTableParameters())
+          .as("Must not have default sort order in catalog")
+          .doesNotContainKey(DEFAULT_SORT_ORDER);
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -313,16 +317,19 @@ public class TestHiveCatalog extends HiveMetastoreTest {
               .withSortOrder(order)
               .create();
       SortOrder sortOrder = table.sortOrder();
-      Assert.assertEquals("Order ID must match", 1, sortOrder.orderId());
-      Assert.assertEquals("Order must have 1 field", 1, 
sortOrder.fields().size());
-      Assert.assertEquals("Direction must match ", ASC, 
sortOrder.fields().get(0).direction());
-      Assert.assertEquals(
-          "Null order must match ", NULLS_FIRST, 
sortOrder.fields().get(0).nullOrder());
+      assertThat(sortOrder.orderId()).as("Order ID must match").isEqualTo(1);
+      assertThat(sortOrder.fields()).as("Order must have 1 field").hasSize(1);
+      assertThat(sortOrder.fields().get(0).direction()).as("Direction must 
match ").isEqualTo(ASC);
+      assertThat(sortOrder.fields().get(0).nullOrder())
+          .as("Null order must match ")
+          .isEqualTo(NULLS_FIRST);
       Transform<?, ?> transform = Transforms.identity(Types.IntegerType.get());
-      Assert.assertEquals("Transform must match", transform, 
sortOrder.fields().get(0).transform());
+      assertThat(sortOrder.fields().get(0).transform())
+          .as("Transform must match")
+          .isEqualTo(transform);
 
-      Assert.assertEquals(
-          SortOrderParser.toJson(table.sortOrder()), 
hmsTableParameters().get(DEFAULT_SORT_ORDER));
+      assertThat(hmsTableParameters())
+          .containsEntry(DEFAULT_SORT_ORDER, 
SortOrderParser.toJson(table.sortOrder()));
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -334,18 +341,17 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     catalog.createNamespace(namespace1, meta);
     Database database1 = metastoreClient.getDatabase(namespace1.toString());
 
-    Assert.assertTrue(database1.getParameters().get("owner").equals("apache"));
-    
Assert.assertTrue(database1.getParameters().get("group").equals("iceberg"));
+    assertThat(database1.getParameters()).containsEntry("owner", "apache");
+    assertThat(database1.getParameters()).containsEntry("group", "iceberg");
 
-    Assert.assertEquals(
-        "There no same location for db and namespace",
-        database1.getLocationUri(),
-        defaultUri(namespace1));
+    assertThat(defaultUri(namespace1))
+        .as("There no same location for db and namespace")
+        .isEqualTo(database1.getLocationUri());
 
     assertThatThrownBy(() -> catalog.createNamespace(namespace1))
         .isInstanceOf(AlreadyExistsException.class)
         .hasMessage("Namespace '" + namespace1 + "' already exists!");
-    String hiveLocalDir = temp.newFolder().toURI().toString();
+    String hiveLocalDir = temp.toFile().toURI().toString();
     // remove the trailing slash of the URI
     hiveLocalDir = hiveLocalDir.substring(0, hiveLocalDir.length() - 1);
     ImmutableMap newMeta =
@@ -357,8 +363,9 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
     catalog.createNamespace(namespace2, newMeta);
     Database database2 = metastoreClient.getDatabase(namespace2.toString());
-    Assert.assertEquals(
-        "There no same location for db and namespace", 
database2.getLocationUri(), hiveLocalDir);
+    assertThat(hiveLocalDir)
+        .as("There no same location for db and namespace")
+        .isEqualTo(database2.getLocationUri());
   }
 
   @Test
@@ -437,8 +444,8 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     catalog.createNamespace(namespace, prop);
     Database db = metastoreClient.getDatabase(namespace.toString());
 
-    Assert.assertEquals(expectedOwner, db.getOwnerName());
-    Assert.assertEquals(expectedOwnerType, db.getOwnerType());
+    assertThat(db.getOwnerName()).isEqualTo(expectedOwner);
+    assertThat(db.getOwnerType()).isEqualTo(expectedOwnerType);
   }
 
   @Test
@@ -447,13 +454,13 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     Namespace namespace1 = Namespace.of("dbname1");
     catalog.createNamespace(namespace1, meta);
     namespaces = catalog.listNamespaces(namespace1);
-    Assert.assertTrue("Hive db not hive the namespace 'dbname1'", 
namespaces.isEmpty());
+    assertThat(namespaces).as("Hive db not hive the namespace 
'dbname1'").isEmpty();
 
     Namespace namespace2 = Namespace.of("dbname2");
     catalog.createNamespace(namespace2, meta);
     namespaces = catalog.listNamespaces();
 
-    Assert.assertTrue("Hive db not hive the namespace 'dbname2'", 
namespaces.contains(namespace2));
+    assertThat(namespaces).as("Hive db not hive the namespace 
'dbname2'").contains(namespace2);
   }
 
   @Test
@@ -463,12 +470,11 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     catalog.createNamespace(namespace, meta);
 
     Map<String, String> nameMata = catalog.loadNamespaceMetadata(namespace);
-    Assert.assertTrue(nameMata.get("owner").equals("apache"));
-    Assert.assertTrue(nameMata.get("group").equals("iceberg"));
-    Assert.assertEquals(
-        "There no same location for db and namespace",
-        nameMata.get("location"),
-        catalog.convertToDatabase(namespace, meta).getLocationUri());
+    assertThat(nameMata).containsEntry("owner", "apache");
+    assertThat(nameMata).containsEntry("group", "iceberg");
+    assertThat(catalog.convertToDatabase(namespace, meta).getLocationUri())
+        .as("There no same location for db and namespace")
+        .isEqualTo(nameMata.get("location"));
   }
 
   @Test
@@ -477,10 +483,10 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
     catalog.createNamespace(namespace, meta);
 
-    Assert.assertTrue("Should true to namespace exist", 
catalog.namespaceExists(namespace));
-    Assert.assertTrue(
-        "Should false to namespace doesn't exist",
-        !catalog.namespaceExists(Namespace.of("db2", "db2", "ns2")));
+    assertThat(catalog.namespaceExists(namespace)).as("Should true to 
namespace exist").isTrue();
+    assertThat(catalog.namespaceExists(Namespace.of("db2", "db2", "ns2")))
+        .as("Should false to namespace doesn't exist")
+        .isFalse();
   }
 
   @Test
@@ -497,9 +503,9 @@ public class TestHiveCatalog extends HiveMetastoreTest {
             "comment", "iceberg test"));
 
     Database database = metastoreClient.getDatabase(namespace.level(0));
-    Assert.assertEquals(database.getParameters().get("owner"), "alter_apache");
-    Assert.assertEquals(database.getParameters().get("test"), "test");
-    Assert.assertEquals(database.getParameters().get("group"), "iceberg");
+    assertThat(database.getParameters()).containsEntry("owner", 
"alter_apache");
+    assertThat(database.getParameters()).containsEntry("test", "test");
+    assertThat(database.getParameters()).containsEntry("group", "iceberg");
 
     assertThatThrownBy(
             () -> catalog.setProperties(Namespace.of("db2", "db2", "ns2"), 
ImmutableMap.of()))
@@ -684,8 +690,8 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     catalog.setProperties(Namespace.of(name), propToSet);
     Database database = metastoreClient.getDatabase(name);
 
-    Assert.assertEquals(expectedOwnerPostSet, database.getOwnerName());
-    Assert.assertEquals(expectedOwnerTypePostSet, database.getOwnerType());
+    assertThat(database.getOwnerName()).isEqualTo(expectedOwnerPostSet);
+    assertThat(database.getOwnerType()).isEqualTo(expectedOwnerTypePostSet);
   }
 
   @Test
@@ -698,8 +704,8 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
     Database database = metastoreClient.getDatabase(namespace.level(0));
 
-    Assert.assertEquals(database.getParameters().get("owner"), null);
-    Assert.assertEquals(database.getParameters().get("group"), "iceberg");
+    assertThat(database.getParameters()).doesNotContainKey("owner");
+    assertThat(database.getParameters()).containsEntry("group", "iceberg");
 
     assertThatThrownBy(
             () ->
@@ -830,8 +836,8 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
     Database database = metastoreClient.getDatabase(name);
 
-    Assert.assertEquals(expectedOwnerPostRemove, database.getOwnerName());
-    Assert.assertEquals(expectedOwnerTypePostRemove, database.getOwnerType());
+    assertThat(database.getOwnerName()).isEqualTo(expectedOwnerPostRemove);
+    assertThat(database.getOwnerType()).isEqualTo(expectedOwnerTypePostRemove);
   }
 
   @Test
@@ -843,18 +849,19 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     catalog.createNamespace(namespace, meta);
     catalog.createTable(identifier, schema);
     Map<String, String> nameMata = catalog.loadNamespaceMetadata(namespace);
-    Assert.assertTrue(nameMata.get("owner").equals("apache"));
-    Assert.assertTrue(nameMata.get("group").equals("iceberg"));
+    assertThat(nameMata).containsEntry("owner", "apache");
+    assertThat(nameMata).containsEntry("group", "iceberg");
 
     assertThatThrownBy(() -> catalog.dropNamespace(namespace))
         .isInstanceOf(NamespaceNotEmptyException.class)
         .hasMessage("Namespace dbname_drop is not empty. One or more tables 
exist.");
-    Assert.assertTrue(catalog.dropTable(identifier, true));
-    Assert.assertTrue(
-        "Should fail to drop namespace if it is not empty", 
catalog.dropNamespace(namespace));
-    Assert.assertFalse(
-        "Should fail to drop when namespace doesn't exist",
-        catalog.dropNamespace(Namespace.of("db.ns1")));
+    assertThat(catalog.dropTable(identifier, true)).isTrue();
+    assertThat(catalog.dropNamespace(namespace))
+        .as("Should fail to drop namespace if it is not empty")
+        .isTrue();
+    assertThat(catalog.dropNamespace(Namespace.of("db.ns1")))
+        .as("Should fail to drop when namespace doesn't exist")
+        .isFalse();
     assertThatThrownBy(() -> catalog.loadNamespaceMetadata(namespace))
         .isInstanceOf(NoSuchNamespaceException.class)
         .hasMessage("Namespace does not exist: dbname_drop");
@@ -868,7 +875,7 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     String metadataFileLocation = 
catalog.newTableOps(identifier).current().metadataFileLocation();
     TableOperations ops = catalog.newTableOps(identifier);
     ops.io().deleteFile(metadataFileLocation);
-    Assert.assertTrue(catalog.dropTable(identifier));
+    assertThat(catalog.dropTable(identifier)).isTrue();
     assertThatThrownBy(() -> catalog.loadTable(identifier))
         .isInstanceOf(NoSuchTableException.class)
         .hasMessageContaining("Table does not exist:");
@@ -884,11 +891,13 @@ public class TestHiveCatalog extends HiveMetastoreTest {
       catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).create();
 
       Table table = catalog.loadTable(tableIdent);
-      Assert.assertEquals("Name must match", "hive.hivedb.tbl", table.name());
+      assertThat(table.name()).as("Name must 
match").isEqualTo("hive.hivedb.tbl");
 
       TableIdentifier snapshotsTableIdent = TableIdentifier.of(DB_NAME, "tbl", 
"snapshots");
       Table snapshotsTable = catalog.loadTable(snapshotsTableIdent);
-      Assert.assertEquals("Name must match", "hive.hivedb.tbl.snapshots", 
snapshotsTable.name());
+      assertThat(snapshotsTable.name())
+          .as("Name must match")
+          .isEqualTo("hive.hivedb.tbl.snapshots");
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -905,12 +914,12 @@ public class TestHiveCatalog extends HiveMetastoreTest {
   public void testUUIDinTableProperties() throws Exception {
     Schema schema = getTestSchema();
     TableIdentifier tableIdentifier = TableIdentifier.of(DB_NAME, "tbl");
-    String location = temp.newFolder("tbl").toString();
+    String location = temp.resolve("tbl").toString();
 
     try {
       catalog.buildTable(tableIdentifier, 
schema).withLocation(location).create();
 
-      Assert.assertNotNull(hmsTableParameters().get(TableProperties.UUID));
+      assertThat(hmsTableParameters()).containsKey(TableProperties.UUID);
     } finally {
       catalog.dropTable(tableIdentifier);
     }
@@ -920,17 +929,18 @@ public class TestHiveCatalog extends HiveMetastoreTest {
   public void testSnapshotStatsTableProperties() throws Exception {
     Schema schema = getTestSchema();
     TableIdentifier tableIdentifier = TableIdentifier.of(DB_NAME, "tbl");
-    String location = temp.newFolder("tbl").toString();
+    String location = temp.resolve("tbl").toString();
 
     try {
       catalog.buildTable(tableIdentifier, 
schema).withLocation(location).create();
 
       // check whether parameters are in expected state
       Map<String, String> parameters = hmsTableParameters();
-      Assert.assertEquals("0", parameters.get(TableProperties.SNAPSHOT_COUNT));
-      Assert.assertNull(parameters.get(CURRENT_SNAPSHOT_SUMMARY));
-      Assert.assertNull(parameters.get(CURRENT_SNAPSHOT_ID));
-      Assert.assertNull(parameters.get(CURRENT_SNAPSHOT_TIMESTAMP));
+      assertThat(parameters).containsEntry(SNAPSHOT_COUNT, "0");
+      assertThat(parameters)
+          .doesNotContainKey(CURRENT_SNAPSHOT_SUMMARY)
+          .doesNotContainKey(CURRENT_SNAPSHOT_ID)
+          .doesNotContainKey(CURRENT_SNAPSHOT_TIMESTAMP);
 
       // create a snapshot
       Table icebergTable = catalog.loadTable(tableIdentifier);
@@ -945,16 +955,16 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
       // check whether parameters are in expected state
       parameters = hmsTableParameters();
-      Assert.assertEquals("1", parameters.get(TableProperties.SNAPSHOT_COUNT));
+      assertThat(parameters).containsEntry(SNAPSHOT_COUNT, "1");
       String summary =
           
JsonUtil.mapper().writeValueAsString(icebergTable.currentSnapshot().summary());
-      Assert.assertEquals(summary, parameters.get(CURRENT_SNAPSHOT_SUMMARY));
+      assertThat(parameters).containsEntry(CURRENT_SNAPSHOT_SUMMARY, summary);
       long snapshotId = icebergTable.currentSnapshot().snapshotId();
-      Assert.assertEquals(String.valueOf(snapshotId), 
parameters.get(CURRENT_SNAPSHOT_ID));
-      Assert.assertEquals(
-          String.valueOf(icebergTable.currentSnapshot().timestampMillis()),
-          parameters.get(CURRENT_SNAPSHOT_TIMESTAMP));
-
+      assertThat(parameters).containsEntry(CURRENT_SNAPSHOT_ID, 
String.valueOf(snapshotId));
+      assertThat(parameters)
+          .containsEntry(
+              CURRENT_SNAPSHOT_TIMESTAMP,
+              
String.valueOf(icebergTable.currentSnapshot().timestampMillis()));
     } finally {
       catalog.dropTable(tableIdentifier);
     }
@@ -974,10 +984,10 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     for (int i = 0; i < 100; i++) {
       summary.put(String.valueOf(i), "value");
     }
-    Assert.assertTrue(JsonUtil.mapper().writeValueAsString(summary).length() < 
4000);
+    
assertThat(JsonUtil.mapper().writeValueAsString(summary).length()).isLessThan(4000);
     Map<String, String> parameters = Maps.newHashMap();
     ops.setSnapshotSummary(parameters, snapshot);
-    Assert.assertEquals("The snapshot summary must be in parameters", 1, 
parameters.size());
+    assertThat(parameters).as("The snapshot summary must be in 
parameters").hasSize(1);
 
     // create a snapshot summary whose json string size exceeds the limit
     for (int i = 0; i < 1000; i++) {
@@ -985,13 +995,12 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     }
     long summarySize = JsonUtil.mapper().writeValueAsString(summary).length();
     // the limit has been updated to 4000 instead of the default value(32672)
-    Assert.assertTrue(summarySize > 4000 && summarySize < 32672);
+    assertThat(summarySize).isGreaterThan(4000).isLessThan(32672);
     parameters.remove(CURRENT_SNAPSHOT_SUMMARY);
     ops.setSnapshotSummary(parameters, snapshot);
-    Assert.assertEquals(
-        "The snapshot summary must not be in parameters due to the size limit",
-        0,
-        parameters.size());
+    assertThat(parameters)
+        .as("The snapshot summary must not be in parameters due to the size 
limit")
+        .isEmpty();
   }
 
   @Test
@@ -1010,18 +1019,19 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     parameters.put(DEFAULT_SORT_ORDER, "sortOrder");
 
     ops.setSnapshotStats(metadata, parameters);
-    Assert.assertNull(parameters.get(CURRENT_SNAPSHOT_SUMMARY));
-    Assert.assertNull(parameters.get(CURRENT_SNAPSHOT_ID));
-    Assert.assertNull(parameters.get(CURRENT_SNAPSHOT_TIMESTAMP));
+    assertThat(parameters)
+        .doesNotContainKey(CURRENT_SNAPSHOT_SUMMARY)
+        .doesNotContainKey(CURRENT_SNAPSHOT_ID)
+        .doesNotContainKey(CURRENT_SNAPSHOT_TIMESTAMP);
 
     ops.setSchema(metadata, parameters);
-    Assert.assertNull(parameters.get(CURRENT_SCHEMA));
+    assertThat(parameters).doesNotContainKey(CURRENT_SCHEMA);
 
     ops.setPartitionSpec(metadata, parameters);
-    Assert.assertNull(parameters.get(DEFAULT_PARTITION_SPEC));
+    assertThat(parameters).doesNotContainKey(DEFAULT_PARTITION_SPEC);
 
     ops.setSortOrder(metadata, parameters);
-    Assert.assertNull(parameters.get(DEFAULT_SORT_ORDER));
+    assertThat(parameters).doesNotContainKey(DEFAULT_SORT_ORDER);
   }
 
   @Test
@@ -1031,14 +1041,14 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
     try {
       Table table = catalog.buildTable(tableIdent, schema).create();
-      Assert.assertFalse(
-          "Must not have default partition spec",
-          
hmsTableParameters().containsKey(TableProperties.DEFAULT_PARTITION_SPEC));
+      assertThat(hmsTableParameters())
+          .as("Must not have default partition spec")
+          .doesNotContainKey(TableProperties.DEFAULT_PARTITION_SPEC);
 
       table.updateSpec().addField(bucket("data", 16)).commit();
-      Assert.assertEquals(
-          PartitionSpecParser.toJson(table.spec()),
-          hmsTableParameters().get(TableProperties.DEFAULT_PARTITION_SPEC));
+      assertThat(hmsTableParameters())
+          .containsEntry(
+              TableProperties.DEFAULT_PARTITION_SPEC, 
PartitionSpecParser.toJson(table.spec()));
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -1052,8 +1062,8 @@ public class TestHiveCatalog extends HiveMetastoreTest {
     try {
       Table table = catalog.buildTable(tableIdent, schema).create();
 
-      Assert.assertEquals(
-          SchemaParser.toJson(table.schema()), 
hmsTableParameters().get(CURRENT_SCHEMA));
+      assertThat(hmsTableParameters())
+          .containsEntry(CURRENT_SCHEMA, SchemaParser.toJson(table.schema()));
 
       // add many new fields to make the schema json string exceed the limit
       UpdateSchema updateSchema = table.updateSchema();
@@ -1062,8 +1072,8 @@ public class TestHiveCatalog extends HiveMetastoreTest {
       }
       updateSchema.commit();
 
-      Assert.assertTrue(SchemaParser.toJson(table.schema()).length() > 32672);
-      Assert.assertNull(hmsTableParameters().get(CURRENT_SCHEMA));
+      
assertThat(SchemaParser.toJson(table.schema()).length()).isGreaterThan(32672);
+      assertThat(hmsTableParameters()).doesNotContainKey(CURRENT_SCHEMA);
     } finally {
       catalog.dropTable(tableIdent);
     }
@@ -1081,10 +1091,9 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
     catalogWithSlash.initialize(
         "hive_catalog", ImmutableMap.of(CatalogProperties.WAREHOUSE_LOCATION, 
wareHousePath + "/"));
-    Assert.assertEquals(
-        "Should have trailing slash stripped",
-        wareHousePath,
-        
catalogWithSlash.getConf().get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname));
+    
assertThat(catalogWithSlash.getConf().get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname))
+        .as("Should have trailing slash stripped")
+        .isEqualTo(wareHousePath);
   }
 
   @Test
@@ -1115,28 +1124,25 @@ public class TestHiveCatalog extends HiveMetastoreTest {
               .withProperty("key5", "table-key5")
               .create();
 
-      Assert.assertEquals(
-          "Table defaults set for the catalog must be added to the table 
properties.",
-          "catalog-default-key1",
-          table.properties().get("key1"));
-      Assert.assertEquals(
-          "Table property must override table default properties set at 
catalog level.",
-          "table-key2",
-          table.properties().get("key2"));
-      Assert.assertEquals(
-          "Table property override set at catalog level must override table 
default"
-              + " properties set at catalog level and table property 
specified.",
-          "catalog-override-key3",
-          table.properties().get("key3"));
-      Assert.assertEquals(
-          "Table override not in table props or defaults should be added to 
table properties",
-          "catalog-override-key4",
-          table.properties().get("key4"));
-      Assert.assertEquals(
-          "Table properties without any catalog level default or override 
should be added to table"
-              + " properties.",
-          "table-key5",
-          table.properties().get("key5"));
+      assertThat(table.properties())
+          .as("Table defaults set for the catalog must be added to the table 
properties.")
+          .containsEntry("key1", "catalog-default-key1");
+      assertThat(table.properties())
+          .as("Table property must override table default properties set at 
catalog level.")
+          .containsEntry("key2", "table-key2");
+      assertThat(table.properties())
+          .as(
+              "Table property override set at catalog level must override 
table default"
+                  + " properties set at catalog level and table property 
specified.")
+          .containsEntry("key3", "catalog-override-key3");
+      assertThat(table.properties())
+          .as("Table override not in table props or defaults should be added 
to table properties")
+          .containsEntry("key4", "catalog-override-key4");
+      assertThat(table.properties())
+          .as(
+              "Table properties without any catalog level default or override 
should be added to table"
+                  + " properties.")
+          .containsEntry("key5", "table-key5");
     } finally {
       hiveCatalog.dropTable(tableIdent);
     }
@@ -1153,7 +1159,7 @@ public class TestHiveCatalog extends HiveMetastoreTest {
 
     Database database = catalog.convertToDatabase(Namespace.of("database"), 
ImmutableMap.of());
 
-    Assert.assertEquals("s3://bucket/database.db", database.getLocationUri());
+    assertThat(database.getLocationUri()).isEqualTo("s3://bucket/database.db");
   }
 
   @Test
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveClientPool.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveClientPool.java
index 5463865186..5a565d0e98 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveClientPool.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveClientPool.java
@@ -18,6 +18,9 @@
  */
 package org.apache.iceberg.hive;
 
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -33,11 +36,9 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.iceberg.relocated.com.google.common.collect.Lists;
 import org.apache.thrift.transport.TTransportException;
-import org.assertj.core.api.Assertions;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 public class TestHiveClientPool {
@@ -54,13 +55,13 @@ public class TestHiveClientPool {
 
   HiveClientPool clients;
 
-  @Before
+  @BeforeEach
   public void before() {
     HiveClientPool clientPool = new HiveClientPool(2, new Configuration());
     clients = Mockito.spy(clientPool);
   }
 
-  @After
+  @AfterEach
   public void after() {
     clients.close();
     clients = null;
@@ -74,16 +75,14 @@ public class TestHiveClientPool {
     HiveClientPool clientPool = new HiveClientPool(10, conf);
     HiveConf clientConf = clientPool.hiveConf();
 
-    Assert.assertEquals(
-        conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname),
-        clientConf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname));
-    Assert.assertEquals(10, clientPool.poolSize());
+    assertThat(clientConf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname))
+        .isEqualTo(conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname));
+    assertThat(clientPool.poolSize()).isEqualTo(10);
 
     // 'hive.metastore.sasl.enabled' should be 'true' as defined in xml
-    Assert.assertEquals(
-        conf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname),
-        clientConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));
-    
Assert.assertTrue(clientConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL));
+    
assertThat(clientConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname))
+        
.isEqualTo(conf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));
+    
assertThat(clientConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)).isTrue();
   }
 
   private HiveConf createHiveConf() {
@@ -100,7 +99,7 @@ public class TestHiveClientPool {
   @Test
   public void testNewClientFailure() {
     Mockito.doThrow(new RuntimeException("Connection 
exception")).when(clients).newClient();
-    Assertions.assertThatThrownBy(() -> clients.run(Object::toString))
+    assertThatThrownBy(() -> clients.run(Object::toString))
         .isInstanceOf(RuntimeException.class)
         .hasMessage("Connection exception");
   }
@@ -112,7 +111,7 @@ public class TestHiveClientPool {
     Mockito.doThrow(new MetaException("Another meta exception"))
         .when(hmsClient)
         .getTables(Mockito.anyString(), Mockito.anyString());
-    Assertions.assertThatThrownBy(() -> clients.run(client -> 
client.getTables("default", "t")))
+    assertThatThrownBy(() -> clients.run(client -> client.getTables("default", 
"t")))
         .isInstanceOf(MetaException.class)
         .hasMessage("Another meta exception");
   }
@@ -132,7 +131,8 @@ public class TestHiveClientPool {
 
     Mockito.doReturn(databases).when(newClient).getAllDatabases();
     // The return is OK when the reconnect method is called.
-    Assert.assertEquals(databases, clients.run(client -> 
client.getAllDatabases(), true));
+    assertThat((List<String>) clients.run(client -> client.getAllDatabases(), 
true))
+        .isEqualTo(databases);
 
     // Verify that the method is called.
     Mockito.verify(clients).reconnect(hmsClient);
@@ -159,8 +159,8 @@ public class TestHiveClientPool {
             FunctionType.JAVA,
             null));
     Mockito.doReturn(response).when(newClient).getAllFunctions();
-
-    Assert.assertEquals(response, clients.run(client -> 
client.getAllFunctions(), true));
+    assertThat((GetAllFunctionsResponse) clients.run(client -> 
client.getAllFunctions(), true))
+        .isEqualTo(response);
 
     Mockito.verify(clients).reconnect(hmsClient);
     Mockito.verify(clients, Mockito.never()).reconnect(newClient);
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java
index 2de728d74b..9704b9f722 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java
@@ -18,6 +18,8 @@
  */
 package org.apache.iceberg.hive;
 
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.atLeastOnce;
 import static org.mockito.Mockito.doAnswer;
@@ -58,12 +60,10 @@ import 
org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
 import org.apache.iceberg.relocated.com.google.common.collect.Lists;
 import org.apache.iceberg.types.Types;
 import org.apache.thrift.TException;
-import org.assertj.core.api.Assertions;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.AdditionalAnswers;
 import org.mockito.ArgumentCaptor;
 import org.mockito.invocation.InvocationOnMock;
@@ -85,7 +85,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
   LockResponse notAcquiredLockResponse = new LockResponse(dummyLockId, 
LockState.NOT_ACQUIRED);
   ShowLocksResponse emptyLocks = new ShowLocksResponse(Lists.newArrayList());
 
-  @BeforeClass
+  @BeforeAll
   public static void startMetastore() throws Exception {
     HiveMetastoreTest.startMetastore(
         ImmutableMap.of(HiveConf.ConfVars.HIVE_TXN_TIMEOUT.varname, "1s"));
@@ -114,12 +114,12 @@ public class TestHiveCommitLocks extends 
HiveTableBaseTest {
     spyCachedClientPool = spy(new CachedClientPool(hiveConf, 
Collections.emptyMap()));
     when(spyCachedClientPool.clientPool()).thenAnswer(invocation -> 
spyClientPool);
 
-    Assert.assertNotNull(spyClientRef.get());
+    assertThat(spyClientRef.get()).isNotNull();
 
     spyClient = spyClientRef.get();
   }
 
-  @Before
+  @BeforeEach
   public void before() throws Exception {
     Table table = catalog.loadTable(TABLE_IDENTIFIER);
     ops = (HiveTableOperations) ((HasTableOperations) table).operations();
@@ -134,7 +134,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
 
     metadataV2 = ops.current();
 
-    Assert.assertEquals(2, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     spyOps =
         spy(
@@ -148,7 +148,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
     reset(spyClient);
   }
 
-  @AfterClass
+  @AfterAll
   public static void cleanup() {
     try {
       spyClientPool.close();
@@ -165,7 +165,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
 
     spyOps.doCommit(metadataV2, metadataV1);
 
-    Assert.assertEquals(1, spyOps.current().schema().columns().size()); // 
should be 1 again
+    assertThat(spyOps.current().schema().columns()).hasSize(1); // should be 1 
again
   }
 
   @Test
@@ -184,7 +184,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
 
     spyOps.doCommit(metadataV2, metadataV1);
 
-    Assert.assertEquals(1, spyOps.current().schema().columns().size()); // 
should be 1 again
+    assertThat(spyOps.current().schema().columns()).hasSize(1); // should be 1 
again
   }
 
   @Test
@@ -202,7 +202,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
 
     spyOps.doCommit(metadataV2, metadataV1);
 
-    Assert.assertEquals(1, spyOps.current().schema().columns().size()); // 
should be 1 again
+    assertThat(spyOps.current().schema().columns()).hasSize(1); // should be 1 
again
   }
 
   @Test
@@ -226,7 +226,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
 
     spyOps.doCommit(metadataV2, metadataV1);
 
-    Assert.assertEquals(1, spyOps.current().schema().columns().size()); // 
should be 1 again
+    assertThat(spyOps.current().schema().columns()).hasSize(1); // should be 1 
again
   }
 
   @Test
@@ -280,7 +280,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
     doNothing().when(spyClient).unlock(eq(dummyLockId));
     doNothing().when(spyClient).heartbeat(eq(0L), eq(dummyLockId));
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(RuntimeException.class)
         .hasMessage(
             "org.apache.iceberg.hive.LockException: "
@@ -304,7 +304,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
     doNothing().when(spyClient).unlock(eq(dummyLockId));
     doNothing().when(spyClient).heartbeat(eq(0L), eq(dummyLockId));
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(RuntimeException.class)
         .hasMessage(
             "org.apache.iceberg.hive.LockException: "
@@ -328,7 +328,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
     doNothing().when(spyClient).unlock(eq(dummyLockId));
     doNothing().when(spyClient).heartbeat(eq(0L), eq(dummyLockId));
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(RuntimeException.class)
         .hasMessage("Interrupted during commit");
 
@@ -363,7 +363,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
   public void testLockFailureAtFirstTime() throws TException {
     doReturn(notAcquiredLockResponse).when(spyClient).lock(any());
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(CommitFailedException.class)
         .hasMessage(
             "org.apache.iceberg.hive.LockException: "
@@ -381,7 +381,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
         .when(spyClient)
         .checkLock(eq(dummyLockId));
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(CommitFailedException.class)
         .hasMessage(
             "org.apache.iceberg.hive.LockException: "
@@ -393,7 +393,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
     doReturn(waitLockResponse).when(spyClient).lock(any());
     doReturn(waitLockResponse).when(spyClient).checkLock(eq(dummyLockId));
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(CommitFailedException.class)
         .hasMessageStartingWith("org.apache.iceberg.hive.LockException")
         .hasMessageContaining("Timed out after")
@@ -408,7 +408,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
         .when(spyClient)
         .checkLock(eq(dummyLockId));
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(RuntimeException.class)
         .hasMessage(
             "org.apache.iceberg.hive.LockException: Metastore operation failed 
for hivedb.tbl");
@@ -427,7 +427,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
         .when(spyClient)
         .checkLock(eq(dummyLockId));
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(CommitFailedException.class)
         .hasMessage(
             "org.apache.iceberg.hive.LockException: "
@@ -488,7 +488,7 @@ public class TestHiveCommitLocks extends HiveTableBaseTest {
         .when(spyClient)
         .heartbeat(eq(0L), eq(dummyLockId));
 
-    Assertions.assertThatThrownBy(() -> spyOps.doCommit(metadataV2, 
metadataV1))
+    assertThatThrownBy(() -> spyOps.doCommit(metadataV2, metadataV1))
         .isInstanceOf(CommitFailedException.class)
         .hasMessage(
             "org.apache.iceberg.hive.LockException: "
@@ -527,9 +527,10 @@ public class TestHiveCommitLocks extends HiveTableBaseTest 
{
 
     // Make sure that the expected parameter context values are set
     Map<String, String> context = contextCaptor.getValue().getProperties();
-    Assert.assertEquals(3, context.size());
-    Assert.assertEquals(
-        context.get("expected_parameter_key"), 
HiveTableOperations.METADATA_LOCATION_PROP);
-    Assert.assertEquals(context.get("expected_parameter_value"), 
metadataV2.metadataFileLocation());
+    assertThat(context).hasSize(3);
+    assertThat(HiveTableOperations.METADATA_LOCATION_PROP)
+        .isEqualTo(context.get("expected_parameter_key"));
+    assertThat(metadataV2.metadataFileLocation())
+        .isEqualTo(context.get("expected_parameter_value"));
   }
 }
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommits.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommits.java
index 6ae39d70ca..aaa6590421 100644
--- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommits.java
+++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommits.java
@@ -18,6 +18,8 @@
  */
 package org.apache.iceberg.hive;
 
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.doAnswer;
@@ -39,9 +41,7 @@ import 
org.apache.iceberg.exceptions.CommitStateUnknownException;
 import org.apache.iceberg.exceptions.ValidationException;
 import org.apache.iceberg.types.Types;
 import org.apache.thrift.TException;
-import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestHiveCommits extends HiveTableBaseTest {
 
@@ -58,7 +58,7 @@ public class TestHiveCommits extends HiveTableBaseTest {
 
     TableMetadata metadataV2 = ops.current();
 
-    Assert.assertEquals(2, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     HiveTableOperations spyOps = spy(ops);
 
@@ -83,7 +83,7 @@ public class TestHiveCommits extends HiveTableBaseTest {
     ops.refresh();
 
     // the commit must succeed
-    Assert.assertEquals(1, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(1);
   }
 
   /**
@@ -105,24 +105,24 @@ public class TestHiveCommits extends HiveTableBaseTest {
 
     TableMetadata metadataV2 = ops.current();
 
-    Assert.assertEquals(2, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     HiveTableOperations spyOps = spy(ops);
 
     failCommitAndThrowException(spyOps);
 
-    Assertions.assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
+    assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
         .isInstanceOf(CommitStateUnknownException.class)
         .hasMessageStartingWith("Datacenter on fire");
 
     ops.refresh();
-    Assert.assertEquals("Current metadata should not have changed", 
metadataV2, ops.current());
-    Assert.assertTrue("Current metadata should still exist", 
metadataFileExists(metadataV2));
-    Assert.assertEquals(
-        "New metadata files should still exist, new location not in history 
but"
-            + " the commit may still succeed",
-        3,
-        metadataFileCount(ops.current()));
+    assertThat(ops.current()).as("Current metadata should not have 
changed").isEqualTo(metadataV2);
+    assertThat(metadataFileExists(metadataV2)).as("Current metadata should 
still exist").isTrue();
+    assertThat(metadataFileCount(ops.current()))
+        .as(
+            "New metadata files should still exist, new location not in 
history but"
+                + " the commit may still succeed")
+        .isEqualTo(3);
   }
 
   /** Pretends we throw an error while persisting that actually does commit 
serverside */
@@ -139,7 +139,7 @@ public class TestHiveCommits extends HiveTableBaseTest {
 
     TableMetadata metadataV2 = ops.current();
 
-    Assert.assertEquals(2, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     HiveTableOperations spyOps = spy(ops);
 
@@ -151,13 +151,13 @@ public class TestHiveCommits extends HiveTableBaseTest {
     spyOps.commit(metadataV2, metadataV1);
 
     ops.refresh();
-    Assert.assertNotEquals("Current metadata should have changed", metadataV2, 
ops.current());
-    Assert.assertTrue(
-        "Current metadata file should still exist", 
metadataFileExists(ops.current()));
-    Assert.assertEquals(
-        "Commit should have been successful and new metadata file should be 
made",
-        3,
-        metadataFileCount(ops.current()));
+    assertThat(ops.current()).as("Current metadata should have 
changed").isNotEqualTo(metadataV2);
+    assertThat(metadataFileExists(ops.current()))
+        .as("Current metadata file should still exist")
+        .isTrue();
+    assertThat(metadataFileCount(ops.current()))
+        .as("Commit should have been successful and new metadata file should 
be made")
+        .isEqualTo(3);
   }
 
   /**
@@ -177,26 +177,26 @@ public class TestHiveCommits extends HiveTableBaseTest {
 
     TableMetadata metadataV2 = ops.current();
 
-    Assert.assertEquals(2, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     HiveTableOperations spyOps = spy(ops);
 
     failCommitAndThrowException(spyOps);
     breakFallbackCatalogCommitCheck(spyOps);
 
-    Assertions.assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
+    assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
         .isInstanceOf(CommitStateUnknownException.class)
         .hasMessageStartingWith("Datacenter on fire");
 
     ops.refresh();
 
-    Assert.assertEquals("Current metadata should not have changed", 
metadataV2, ops.current());
-    Assert.assertTrue(
-        "Current metadata file should still exist", 
metadataFileExists(ops.current()));
-    Assert.assertEquals(
-        "Client could not determine outcome so new metadata file should also 
exist",
-        3,
-        metadataFileCount(ops.current()));
+    assertThat(ops.current()).as("Current metadata should not have 
changed").isEqualTo(metadataV2);
+    assertThat(metadataFileExists(ops.current()))
+        .as("Current metadata file should still exist")
+        .isTrue();
+    assertThat(metadataFileCount(ops.current()))
+        .as("Client could not determine outcome so new metadata file should 
also exist")
+        .isEqualTo(3);
   }
 
   /**
@@ -216,22 +216,23 @@ public class TestHiveCommits extends HiveTableBaseTest {
 
     TableMetadata metadataV2 = ops.current();
 
-    Assert.assertEquals(2, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     HiveTableOperations spyOps = spy(ops);
 
     commitAndThrowException(ops, spyOps);
     breakFallbackCatalogCommitCheck(spyOps);
 
-    Assertions.assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
+    assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
         .isInstanceOf(CommitStateUnknownException.class)
         .hasMessageStartingWith("Datacenter on fire");
 
     ops.refresh();
 
-    Assert.assertFalse("Current metadata should have changed", 
ops.current().equals(metadataV2));
-    Assert.assertTrue(
-        "Current metadata file should still exist", 
metadataFileExists(ops.current()));
+    assertThat(ops.current()).as("Current metadata should have 
changed").isNotEqualTo(metadataV2);
+    assertThat(metadataFileExists(ops.current()))
+        .as("Current metadata file should still exist")
+        .isTrue();
   }
 
   /**
@@ -265,7 +266,7 @@ public class TestHiveCommits extends HiveTableBaseTest {
 
     TableMetadata metadataV2 = ops.current();
 
-    Assert.assertEquals(2, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     HiveTableOperations spyOps = spy(ops);
 
@@ -287,30 +288,29 @@ public class TestHiveCommits extends HiveTableBaseTest {
     spyOps.commit(metadataV2, metadataV1);
 
     ops.refresh();
-    Assert.assertNotEquals("Current metadata should have changed", metadataV2, 
ops.current());
-    Assert.assertTrue(
-        "Current metadata file should still exist", 
metadataFileExists(ops.current()));
-    Assert.assertEquals(
-        "The column addition from the concurrent commit should have been 
successful",
-        2,
-        ops.current().schema().columns().size());
+    assertThat(ops.current()).as("Current metadata should have 
changed").isNotEqualTo(metadataV2);
+    assertThat(metadataFileExists(ops.current()))
+        .as("Current metadata file should still exist")
+        .isTrue();
+    assertThat(ops.current().schema().columns())
+        .as("The column addition from the concurrent commit should have been 
successful")
+        .hasSize(2);
   }
 
   @Test
   public void testInvalidObjectException() {
     TableIdentifier badTi = TableIdentifier.of(DB_NAME, "`tbl`");
-    Assert.assertThrows(
-        String.format("Invalid table name for %s.%s", DB_NAME, "`tbl`"),
-        ValidationException.class,
-        () -> catalog.createTable(badTi, schema, 
PartitionSpec.unpartitioned()));
+    assertThatThrownBy(() -> catalog.createTable(badTi, schema, 
PartitionSpec.unpartitioned()))
+        .isInstanceOf(ValidationException.class)
+        .hasMessage(String.format("Invalid Hive object for %s.%s", DB_NAME, 
"`tbl`"));
   }
 
   @Test
   public void testAlreadyExistsException() {
-    Assert.assertThrows(
-        String.format("Table already exists: %s.%s", DB_NAME, TABLE_NAME),
-        AlreadyExistsException.class,
-        () -> catalog.createTable(TABLE_IDENTIFIER, schema, 
PartitionSpec.unpartitioned()));
+    assertThatThrownBy(
+            () -> catalog.createTable(TABLE_IDENTIFIER, schema, 
PartitionSpec.unpartitioned()))
+        .isInstanceOf(AlreadyExistsException.class)
+        .hasMessage(String.format("Table already exists: %s.%s", DB_NAME, 
TABLE_NAME));
   }
 
   /** Uses NoLock and pretends we throw an error because of a concurrent 
commit */
@@ -327,7 +327,7 @@ public class TestHiveCommits extends HiveTableBaseTest {
 
     TableMetadata metadataV2 = ops.current();
 
-    Assert.assertEquals(2, ops.current().schema().columns().size());
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     HiveTableOperations spyOps = spy(ops);
 
@@ -342,14 +342,16 @@ public class TestHiveCommits extends HiveTableBaseTest {
         .persistTable(any(), anyBoolean(), any());
 
     // Should throw a CommitFailedException so the commit could be retried
-    Assertions.assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
+    assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
         .isInstanceOf(CommitFailedException.class)
         .hasMessage("The table hivedb.tbl has been modified concurrently");
 
     ops.refresh();
-    Assert.assertEquals("Current metadata should not have changed", 
metadataV2, ops.current());
-    Assert.assertTrue("Current metadata should still exist", 
metadataFileExists(metadataV2));
-    Assert.assertEquals("New metadata files should not exist", 2, 
metadataFileCount(ops.current()));
+    assertThat(ops.current()).as("Current metadata should not have 
changed").isEqualTo(metadataV2);
+    assertThat(metadataFileExists(metadataV2)).as("Current metadata should 
still exist").isTrue();
+    assertThat(metadataFileCount(ops.current()))
+        .as("New metadata files should not exist")
+        .isEqualTo(2);
   }
 
   @Test
@@ -365,7 +367,7 @@ public class TestHiveCommits extends HiveTableBaseTest {
 
     TableMetadata metadataV2 = ops.current();
 
-    
Assertions.assertThat(ops.current().schema().columns().size()).isEqualTo(2);
+    assertThat(ops.current().schema().columns()).hasSize(2);
 
     HiveTableOperations spyOps = spy(ops);
 
@@ -381,16 +383,16 @@ public class TestHiveCommits extends HiveTableBaseTest {
         .when(spyOps)
         .persistTable(any(), anyBoolean(), any());
 
-    Assertions.assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
+    assertThatThrownBy(() -> spyOps.commit(metadataV2, metadataV1))
         .hasMessageContaining("Failed to heartbeat for hive lock while")
         .isInstanceOf(CommitStateUnknownException.class);
 
     ops.refresh();
 
-    Assertions.assertThat(ops.current().location())
+    assertThat(ops.current().location())
         .as("Current metadata should have changed to metadata V1")
         .isEqualTo(metadataV1.location());
-    Assertions.assertThat(metadataFileExists(ops.current()))
+    assertThat(metadataFileExists(ops.current()))
         .as("Current metadata file should still exist")
         .isTrue();
   }
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveMetastore.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveMetastore.java
index 08a70e95a1..e60181a33b 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveMetastore.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveMetastore.java
@@ -21,6 +21,7 @@ package org.apache.iceberg.hive;
 import static java.nio.file.Files.createTempDirectory;
 import static java.nio.file.attribute.PosixFilePermissions.asFileAttribute;
 import static java.nio.file.attribute.PosixFilePermissions.fromString;
+import static org.assertj.core.api.Assertions.assertThat;
 
 import java.io.File;
 import java.io.IOException;
@@ -52,7 +53,6 @@ import org.apache.thrift.server.TServer;
 import org.apache.thrift.server.TThreadPoolServer;
 import org.apache.thrift.transport.TServerSocket;
 import org.apache.thrift.transport.TTransportFactory;
-import org.junit.Assert;
 
 public class TestHiveMetastore {
 
@@ -112,7 +112,7 @@ public class TestHiveMetastore {
                     FileSystem fs = Util.getFs(localDirPath, new 
Configuration());
                     String errMsg = "Failed to delete " + localDirPath;
                     try {
-                      Assert.assertTrue(errMsg, fs.delete(localDirPath, true));
+                      assertThat(fs.delete(localDirPath, 
true)).isEqualTo(errMsg);
                     } catch (IOException e) {
                       throw new RuntimeException(errMsg, e);
                     }
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveSchemaUtil.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveSchemaUtil.java
index c444df37b6..84d11d03a7 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveSchemaUtil.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveSchemaUtil.java
@@ -19,6 +19,8 @@
 package org.apache.iceberg.hive;
 
 import static org.apache.iceberg.types.Types.NestedField.optional;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 
 import java.util.Arrays;
 import java.util.List;
@@ -32,9 +34,7 @@ import 
org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
 import org.apache.iceberg.relocated.com.google.common.collect.Lists;
 import org.apache.iceberg.types.Type;
 import org.apache.iceberg.types.Types;
-import org.assertj.core.api.Assertions;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestHiveSchemaUtil {
   private static final Schema SIMPLE_ICEBERG_SCHEMA =
@@ -113,8 +113,8 @@ public class TestHiveSchemaUtil {
 
   @Test
   public void testSimpleSchemaConvertToIcebergSchema() {
-    Assert.assertEquals(
-        SIMPLE_ICEBERG_SCHEMA.asStruct(), 
HiveSchemaUtil.convert(SIMPLE_HIVE_SCHEMA).asStruct());
+    assertThat(HiveSchemaUtil.convert(SIMPLE_HIVE_SCHEMA).asStruct())
+        .isEqualTo(SIMPLE_ICEBERG_SCHEMA.asStruct());
   }
 
   @Test
@@ -127,27 +127,26 @@ public class TestHiveSchemaUtil {
             .collect(Collectors.toList());
     List<String> comments =
         
SIMPLE_HIVE_SCHEMA.stream().map(FieldSchema::getComment).collect(Collectors.toList());
-    Assert.assertEquals(
-        SIMPLE_ICEBERG_SCHEMA.asStruct(),
-        HiveSchemaUtil.convert(names, types, comments).asStruct());
+    assertThat(HiveSchemaUtil.convert(names, types, comments).asStruct())
+        .isEqualTo(SIMPLE_ICEBERG_SCHEMA.asStruct());
   }
 
   @Test
   public void testComplexSchemaConvertToIcebergSchema() {
-    Assert.assertEquals(
-        COMPLEX_ICEBERG_SCHEMA.asStruct(), 
HiveSchemaUtil.convert(COMPLEX_HIVE_SCHEMA).asStruct());
+    assertThat(HiveSchemaUtil.convert(COMPLEX_HIVE_SCHEMA).asStruct())
+        .isEqualTo(COMPLEX_ICEBERG_SCHEMA.asStruct());
   }
 
   @Test
   public void testSchemaConvertToIcebergSchemaForEveryPrimitiveType() {
     Schema schemaWithEveryType = 
HiveSchemaUtil.convert(getSupportedFieldSchemas());
-    Assert.assertEquals(getSchemaWithSupportedTypes().asStruct(), 
schemaWithEveryType.asStruct());
+    
assertThat(schemaWithEveryType.asStruct()).isEqualTo(getSchemaWithSupportedTypes().asStruct());
   }
 
   @Test
   public void testNotSupportedTypes() {
     for (FieldSchema notSupportedField : getNotSupportedFieldSchemas()) {
-      Assertions.assertThatThrownBy(
+      assertThatThrownBy(
               () -> 
HiveSchemaUtil.convert(Lists.newArrayList(Arrays.asList(notSupportedField))))
           .isInstanceOf(IllegalArgumentException.class)
           .hasMessageStartingWith("Unsupported Hive type");
@@ -156,12 +155,12 @@ public class TestHiveSchemaUtil {
 
   @Test
   public void testSimpleSchemaConvertToHiveSchema() {
-    Assert.assertEquals(SIMPLE_HIVE_SCHEMA, 
HiveSchemaUtil.convert(SIMPLE_ICEBERG_SCHEMA));
+    
assertThat(HiveSchemaUtil.convert(SIMPLE_ICEBERG_SCHEMA)).isEqualTo(SIMPLE_HIVE_SCHEMA);
   }
 
   @Test
   public void testComplexSchemaConvertToHiveSchema() {
-    Assert.assertEquals(COMPLEX_HIVE_SCHEMA, 
HiveSchemaUtil.convert(COMPLEX_ICEBERG_SCHEMA));
+    
assertThat(HiveSchemaUtil.convert(COMPLEX_ICEBERG_SCHEMA)).isEqualTo(COMPLEX_HIVE_SCHEMA);
   }
 
   @Test
@@ -200,7 +199,7 @@ public class TestHiveSchemaUtil {
                 
TypeInfoUtils.getTypeInfoFromTypeString(serdeConstants.STRING_TYPE_NAME)),
             Arrays.asList("customer comment"));
 
-    Assert.assertEquals(expected.asStruct(), schema.asStruct());
+    assertThat(schema.asStruct()).isEqualTo(expected.asStruct());
   }
 
   protected List<FieldSchema> getSupportedFieldSchemas() {
@@ -252,7 +251,7 @@ public class TestHiveSchemaUtil {
    */
   private void checkConvert(TypeInfo typeInfo, Type type) {
     // Convert to TypeInfo
-    Assert.assertEquals(typeInfo, HiveSchemaUtil.convert(type));
+    assertThat(HiveSchemaUtil.convert(type)).isEqualTo(typeInfo);
     // Convert to Type
     assertEquals(type, HiveSchemaUtil.convert(typeInfo));
   }
@@ -265,13 +264,13 @@ public class TestHiveSchemaUtil {
    */
   private void assertEquals(Type expected, Type actual) {
     if (actual.isPrimitiveType()) {
-      Assert.assertEquals(expected, actual);
+      assertThat(actual).isEqualTo(expected);
     } else {
       List<Types.NestedField> expectedFields = ((Type.NestedType) 
expected).fields();
       List<Types.NestedField> actualFields = ((Type.NestedType) 
actual).fields();
       for (int i = 0; i < expectedFields.size(); ++i) {
         assertEquals(expectedFields.get(i).type(), actualFields.get(i).type());
-        Assert.assertEquals(expectedFields.get(i).name(), 
actualFields.get(i).name());
+        
assertThat(actualFields.get(i).name()).isEqualTo(expectedFields.get(i).name());
       }
     }
   }
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveTableConcurrency.java
 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveTableConcurrency.java
index b8630364e8..e7608962cb 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveTableConcurrency.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveTableConcurrency.java
@@ -21,6 +21,7 @@ package org.apache.iceberg.hive;
 import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
 import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
 import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES;
+import static org.assertj.core.api.Assertions.assertThat;
 
 import java.util.UUID;
 import java.util.concurrent.ExecutorService;
@@ -32,11 +33,9 @@ import org.apache.iceberg.DataFile;
 import org.apache.iceberg.DataFiles;
 import org.apache.iceberg.FileFormat;
 import org.apache.iceberg.Table;
-import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
 import 
org.apache.iceberg.relocated.com.google.common.util.concurrent.MoreExecutors;
 import org.apache.iceberg.util.Tasks;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestHiveTableConcurrency extends HiveTableBaseTest {
 
@@ -78,7 +77,7 @@ public class TestHiveTableConcurrency extends 
HiveTableBaseTest {
             });
 
     icebergTable.refresh();
-    Assert.assertEquals(20, 
icebergTable.currentSnapshot().allManifests(icebergTable.io()).size());
+    
assertThat(icebergTable.currentSnapshot().allManifests(icebergTable.io())).hasSize(20);
   }
 
   @Test
@@ -109,7 +108,7 @@ public class TestHiveTableConcurrency extends 
HiveTableBaseTest {
     }
 
     executorService.shutdown();
-    Assert.assertTrue("Timeout", executorService.awaitTermination(3, 
TimeUnit.MINUTES));
-    Assert.assertEquals(7, Iterables.size(icebergTable.snapshots()));
+    assertThat(executorService.awaitTermination(3, 
TimeUnit.MINUTES)).as("Timeout").isTrue();
+    assertThat(icebergTable.snapshots()).hasSize(7);
   }
 }
diff --git 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestLoadHiveCatalog.java 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestLoadHiveCatalog.java
index 06bd79914f..4886140cde 100644
--- 
a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestLoadHiveCatalog.java
+++ 
b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestLoadHiveCatalog.java
@@ -18,29 +18,30 @@
  */
 package org.apache.iceberg.hive;
 
+import static org.assertj.core.api.Assertions.assertThat;
+
 import java.util.Collections;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.iceberg.CatalogProperties;
 import org.apache.iceberg.CatalogUtil;
 import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 public class TestLoadHiveCatalog {
 
   private static TestHiveMetastore metastore;
 
-  @BeforeClass
+  @BeforeAll
   public static void startMetastore() throws Exception {
     HiveConf hiveConf = new HiveConf(TestLoadHiveCatalog.class);
     metastore = new TestHiveMetastore();
     metastore.start(hiveConf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void stopMetastore() throws Exception {
     if (metastore != null) {
       metastore.stop();
@@ -67,7 +68,7 @@ public class TestLoadHiveCatalog {
 
     CachedClientPool clientPool1 = (CachedClientPool) 
hiveCatalog1.clientPool();
     CachedClientPool clientPool2 = (CachedClientPool) 
hiveCatalog2.clientPool();
-    Assert.assertSame(clientPool1.clientPool(), clientPool2.clientPool());
+    assertThat(clientPool2.clientPool()).isSameAs(clientPool1.clientPool());
 
     Configuration conf1 = new Configuration(metastore.hiveConf());
     Configuration conf2 = new Configuration(metastore.hiveConf());
@@ -89,7 +90,7 @@ public class TestLoadHiveCatalog {
                 conf2);
     clientPool1 = (CachedClientPool) hiveCatalog1.clientPool();
     clientPool2 = (CachedClientPool) hiveCatalog2.clientPool();
-    Assert.assertSame(clientPool1.clientPool(), clientPool2.clientPool());
+    assertThat(clientPool2.clientPool()).isSameAs(clientPool1.clientPool());
 
     conf2.set("any.key", "any.value2");
     hiveCatalog2 =
@@ -100,6 +101,6 @@ public class TestLoadHiveCatalog {
                 ImmutableMap.of(CatalogProperties.CLIENT_POOL_CACHE_KEYS, 
"conf:any.key"),
                 conf2);
     clientPool2 = (CachedClientPool) hiveCatalog2.clientPool();
-    Assert.assertNotSame(clientPool1.clientPool(), clientPool2.clientPool());
+    assertThat(clientPool2.clientPool()).isNotSameAs(clientPool1.clientPool());
   }
 }

Reply via email to