This is an automated email from the ASF dual-hosted git repository.

mimaison pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 5624bc7c7e4 MINOR: Remove hamcrest from connect:runtime, raft and 
server-common (#17394)
5624bc7c7e4 is described below

commit 5624bc7c7e43339eb64e18491ac5039324e50678
Author: Mickael Maison <[email protected]>
AuthorDate: Tue Oct 8 14:22:45 2024 +0200

    MINOR: Remove hamcrest from connect:runtime, raft and server-common (#17394)
    
    
    Reviewers: David Arthur <[email protected]>
---
 build.gradle                                       |   4 -
 .../integration/ConnectWorkerIntegrationTest.java  |   6 +-
 .../integration/OffsetsApiIntegrationTest.java     |  54 ++++++-----
 .../connect/runtime/SourceConnectorConfigTest.java |  10 +-
 .../apache/kafka/connect/runtime/WorkerTest.java   |  23 ++---
 .../kafka/connect/runtime/WorkerTestUtils.java     |   8 +-
 .../kafka/connect/storage/OffsetUtilsTest.java     |  13 ++-
 .../apache/kafka/connect/util/SinkUtilsTest.java   |  18 ++--
 .../kafka/connect/util/TopicCreationTest.java      | 102 ++++++++++-----------
 .../org/apache/kafka/raft/KafkaRaftClientTest.java |   7 +-
 .../apache/kafka/timeline/TimelineHashMapTest.java |   9 +-
 .../apache/kafka/timeline/TimelineHashSetTest.java |  20 ++--
 12 files changed, 116 insertions(+), 158 deletions(-)

diff --git a/build.gradle b/build.gradle
index 242143794e4..06fe8fd6bcd 100644
--- a/build.gradle
+++ b/build.gradle
@@ -1384,7 +1384,6 @@ project(':metadata') {
     compileOnly libs.reload4j
     testImplementation libs.junitJupiter
     testImplementation libs.jqwik
-    testImplementation libs.hamcrest
     testImplementation libs.mockitoCore
     testImplementation libs.slf4jReload4j
     testImplementation project(':clients').sourceSets.test.output
@@ -2060,7 +2059,6 @@ project(':raft') {
     testImplementation libs.junitJupiter
     testImplementation libs.mockitoCore
     testImplementation libs.jqwik
-    testImplementation libs.hamcrest
 
     testRuntimeOnly runtimeTestLibs
 
@@ -2155,7 +2153,6 @@ project(':server-common') {
     testImplementation project(':clients').sourceSets.test.output
     testImplementation libs.junitJupiter
     testImplementation libs.mockitoCore
-    testImplementation libs.hamcrest
 
     testRuntimeOnly runtimeTestLibs
   }
@@ -3548,7 +3545,6 @@ project(':connect:runtime') {
 
     testImplementation libs.junitJupiter
     testImplementation libs.mockitoCore
-    testImplementation libs.hamcrest
     testImplementation libs.mockitoJunitJupiter
     testImplementation libs.httpclient
 
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java
index 845cffb07b0..50e690057ec 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java
@@ -105,8 +105,6 @@ import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.REB
 import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG;
 import static 
org.apache.kafka.connect.util.clusters.ConnectAssertions.CONNECTOR_SETUP_DURATION_MS;
 import static org.apache.kafka.test.TestUtils.waitForCondition;
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertInstanceOf;
@@ -592,7 +590,7 @@ public class ConnectWorkerIntegrationTest {
             List<LogCaptureAppender.Event> logEvents = 
logCaptureAppender.getEvents();
             assertEquals(1, logEvents.size());
             assertEquals(Level.WARN.toString(), logEvents.get(0).getLevel());
-            assertThat(logEvents.get(0).getMessage(), 
containsString("deprecated"));
+            assertTrue(logEvents.get(0).getMessage().contains("deprecated"));
         }
 
     }
@@ -1042,7 +1040,7 @@ public class ConnectWorkerIntegrationTest {
                 maxTasks
         );
         String errorMessage = 
connect.connectorStatus(CONNECTOR_NAME).connector().trace();
-        assertThat(errorMessage, containsString(expectedErrorSnippet));
+        assertTrue(errorMessage.contains(expectedErrorSnippet));
 
         // Stop all workers in the cluster
         connect.workers().forEach(connect::removeWorker);
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java
index 0f013129c75..bd339b9a754 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java
@@ -69,8 +69,6 @@ import static 
org.apache.kafka.connect.runtime.WorkerConfig.KEY_CONVERTER_CLASS_
 import static 
org.apache.kafka.connect.runtime.WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG;
 import static 
org.apache.kafka.connect.runtime.WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG;
 import static org.apache.kafka.test.TestUtils.waitForCondition;
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -408,7 +406,7 @@ public class OffsetsApiIntegrationTest {
         // Alter the sink connector's offsets, with retry logic (since we just 
stopped the connector)
         String response = modifySinkConnectorOffsetsWithRetry(new 
ConnectorOffsets(offsetsToAlter));
 
-        assertThat(response, containsString("The Connect framework-managed 
offsets for this connector have been altered successfully. " +
+        assertTrue(response.contains("The Connect framework-managed offsets 
for this connector have been altered successfully. " +
                 "However, if this connector manages offsets externally, they 
will need to be manually altered in the system that the connector uses."));
 
         verifyExpectedSinkConnectorOffsets(connectorName, topic, numPartitions 
- 1, 5,
@@ -428,7 +426,7 @@ public class OffsetsApiIntegrationTest {
         }
 
         response = connect.alterConnectorOffsets(connectorName, new 
ConnectorOffsets(offsetsToAlter));
-        assertThat(response, containsString("The offsets for this connector 
have been altered successfully"));
+        assertTrue(response.contains("The offsets for this connector have been 
altered successfully"));
 
         verifyExpectedSinkConnectorOffsets(connectorName, topic, numPartitions 
- 1, 3,
                 "Sink connector consumer group offsets should reflect the 
altered offsets");
@@ -478,7 +476,7 @@ public class OffsetsApiIntegrationTest {
 
         ConnectRestException e = assertThrows(ConnectRestException.class,
                 () -> connect.alterConnectorOffsets(connectorName, new 
ConnectorOffsets(offsetsToAlter)));
-        assertThat(e.getMessage(), containsString("zombie sink task"));
+        assertTrue(e.getMessage().contains("zombie sink task"));
 
         // clean up blocked threads created while testing zombie task scenarios
         BlockingConnectorTest.Block.reset();
@@ -500,49 +498,49 @@ public class OffsetsApiIntegrationTest {
         String content = "{}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), 
containsString("Partitions / offsets need to be provided for an alter offsets 
request"));
+            assertTrue(response.getEntity().toString().contains("Partitions / 
offsets need to be provided for an alter offsets request"));
         }
 
         content = "{\"offsets\": []}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), 
containsString("Partitions / offsets need to be provided for an alter offsets 
request"));
+            assertTrue(response.getEntity().toString().contains("Partitions / 
offsets need to be provided for an alter offsets request"));
         }
 
         content = "{\"offsets\": [{}]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("The 
partition for a sink connector offset cannot be null or missing"));
+            assertTrue(response.getEntity().toString().contains("The partition 
for a sink connector offset cannot be null or missing"));
         }
 
         content = "{\"offsets\": [{\"partition\": null, \"offset\": null}]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("The 
partition for a sink connector offset cannot be null or missing"));
+            assertTrue(response.getEntity().toString().contains("The partition 
for a sink connector offset cannot be null or missing"));
         }
 
         content = "{\"offsets\": [{\"partition\": {}, \"offset\": null}]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("The 
partition for a sink connector offset must contain the keys 'kafka_topic' and 
'kafka_partition'"));
+            assertTrue(response.getEntity().toString().contains("The partition 
for a sink connector offset must contain the keys 'kafka_topic' and 
'kafka_partition'"));
         }
 
         content = "{\"offsets\": [{\"partition\": {\"kafka_topic\": \"test\", 
\"kafka_partition\": \"not a number\"}, \"offset\": null}]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), 
containsString("Partition values for sink connectors need to be integers"));
+            assertTrue(response.getEntity().toString().contains("Partition 
values for sink connectors need to be integers"));
         }
 
         content = "{\"offsets\": [{\"partition\": {\"kafka_topic\": \"test\", 
\"kafka_partition\": 1}, \"offset\": {}}]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("The 
offset for a sink connector should either be null or contain the key 
'kafka_offset'"));
+            assertTrue(response.getEntity().toString().contains("The offset 
for a sink connector should either be null or contain the key 'kafka_offset'"));
         }
 
         content = "{\"offsets\": [{\"partition\": {\"kafka_topic\": \"test\", 
\"kafka_partition\": 1}, \"offset\": {\"kafka_offset\": \"not a number\"}}]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("Offset 
values for sink connectors need to be integers"));
+            assertTrue(response.getEntity().toString().contains("Offset values 
for sink connectors need to be integers"));
         }
     }
 
@@ -607,7 +605,7 @@ public class OffsetsApiIntegrationTest {
         }
 
         String response = connect.alterConnectorOffsets(connectorName, new 
ConnectorOffsets(offsetsToAlter));
-        assertThat(response, containsString("The Connect framework-managed 
offsets for this connector have been altered successfully. " +
+        assertTrue(response.contains("The Connect framework-managed offsets 
for this connector have been altered successfully. " +
                 "However, if this connector manages offsets externally, they 
will need to be manually altered in the system that the connector uses."));
 
         verifyExpectedSourceConnectorOffsets(connectorName, NUM_TASKS, 5,
@@ -628,7 +626,7 @@ public class OffsetsApiIntegrationTest {
         }
 
         response = connect.alterConnectorOffsets(connectorName, new 
ConnectorOffsets(offsetsToAlter));
-        assertThat(response, containsString("The offsets for this connector 
have been altered successfully"));
+        assertTrue(response.contains("The offsets for this connector have been 
altered successfully"));
 
         verifyExpectedSourceConnectorOffsets(connectorName, NUM_TASKS, 7,
                 "Source connector offsets should reflect the altered offsets");
@@ -660,49 +658,49 @@ public class OffsetsApiIntegrationTest {
         String content = "[]";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(500, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("Cannot 
deserialize value"));
+            assertTrue(response.getEntity().toString().contains("Cannot 
deserialize value"));
         }
 
         content = "{}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), 
containsString("Partitions / offsets need to be provided for an alter offsets 
request"));
+            assertTrue(response.getEntity().toString().contains("Partitions / 
offsets need to be provided for an alter offsets request"));
         }
 
         content = "{\"key\": []}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(500, response.getStatus());
-            assertThat(response.getEntity().toString(), 
containsString("Unrecognized field"));
+            assertTrue(response.getEntity().toString().contains("Unrecognized 
field"));
         }
 
         content = "{\"offsets\": []}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(400, response.getStatus());
-            assertThat(response.getEntity().toString(), 
containsString("Partitions / offsets need to be provided for an alter offsets 
request"));
+            assertTrue(response.getEntity().toString().contains("Partitions / 
offsets need to be provided for an alter offsets request"));
         }
 
         content = "{\"offsets\": {}}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(500, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("Cannot 
deserialize value"));
+            assertTrue(response.getEntity().toString().contains("Cannot 
deserialize value"));
         }
 
         content = "{\"offsets\": [123]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(500, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("Cannot 
construct instance"));
+            assertTrue(response.getEntity().toString().contains("Cannot 
construct instance"));
         }
 
         content = "{\"offsets\": [{\"key\": \"val\"}]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(500, response.getStatus());
-            assertThat(response.getEntity().toString(), 
containsString("Unrecognized field"));
+            assertTrue(response.getEntity().toString().contains("Unrecognized 
field"));
         }
 
         content = "{\"offsets\": [{\"partition\": []]}]}";
         try (Response response = connect.requestPatch(url, content)) {
             assertEquals(500, response.getStatus());
-            assertThat(response.getEntity().toString(), containsString("Cannot 
deserialize value"));
+            assertTrue(response.getEntity().toString().contains("Cannot 
deserialize value"));
         }
     }
 
@@ -771,14 +769,14 @@ public class OffsetsApiIntegrationTest {
 
         // Reset the sink connector's offsets, with retry logic (since we just 
stopped the connector)
         String response = modifySinkConnectorOffsetsWithRetry(null);
-        assertThat(response, containsString("The Connect framework-managed 
offsets for this connector have been reset successfully. " +
+        assertTrue(response.contains("The Connect framework-managed offsets 
for this connector have been reset successfully. " +
                 "However, if this connector manages offsets externally, they 
will need to be manually reset in the system that the connector uses."));
 
         verifyEmptyConnectorOffsets(connectorName);
 
         // Reset the sink connector's offsets again while it is still in a 
STOPPED state and ensure that there is no error
         response = connect.resetConnectorOffsets(connectorName);
-        assertThat(response, containsString("The Connect framework-managed 
offsets for this connector have been reset successfully. " +
+        assertTrue(response.contains("The Connect framework-managed offsets 
for this connector have been reset successfully. " +
                 "However, if this connector manages offsets externally, they 
will need to be manually reset in the system that the connector uses."));
 
         verifyEmptyConnectorOffsets(connectorName);
@@ -822,7 +820,7 @@ public class OffsetsApiIntegrationTest {
 
         // Try to reset the offsets
         ConnectRestException e = assertThrows(ConnectRestException.class, () 
-> connect.resetConnectorOffsets(connectorName));
-        assertThat(e.getMessage(), containsString("zombie sink task"));
+        assertTrue(e.getMessage().contains("zombie sink task"));
 
         // clean up blocked threads created while testing zombie task scenarios
         BlockingConnectorTest.Block.reset();
@@ -881,14 +879,14 @@ public class OffsetsApiIntegrationTest {
 
         // Reset the source connector's offsets
         String response = connect.resetConnectorOffsets(connectorName);
-        assertThat(response, containsString("The Connect framework-managed 
offsets for this connector have been reset successfully. " +
+        assertTrue(response.contains("The Connect framework-managed offsets 
for this connector have been reset successfully. " +
                 "However, if this connector manages offsets externally, they 
will need to be manually reset in the system that the connector uses."));
 
         verifyEmptyConnectorOffsets(connectorName);
 
         // Reset the source connector's offsets again while it is still in a 
STOPPED state and ensure that there is no error
         response = connect.resetConnectorOffsets(connectorName);
-        assertThat(response, containsString("The Connect framework-managed 
offsets for this connector have been reset successfully. " +
+        assertTrue(response.contains("The Connect framework-managed offsets 
for this connector have been reset successfully. " +
                 "However, if this connector manages offsets externally, they 
will need to be manually reset in the system that the connector uses."));
 
         verifyEmptyConnectorOffsets(connectorName);
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceConnectorConfigTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceConnectorConfigTest.java
index b3ad1db30e7..e8e8fbcbdd8 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceConnectorConfigTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceConnectorConfigTest.java
@@ -39,8 +39,6 @@ import static 
org.apache.kafka.connect.runtime.TopicCreationConfig.DEFAULT_TOPIC
 import static 
org.apache.kafka.connect.runtime.TopicCreationConfig.DEFAULT_TOPIC_CREATION_PREFIX;
 import static 
org.apache.kafka.connect.runtime.TopicCreationConfig.PARTITIONS_CONFIG;
 import static 
org.apache.kafka.connect.runtime.TopicCreationConfig.REPLICATION_FACTOR_CONFIG;
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -90,13 +88,13 @@ public class SourceConnectorConfigTest {
         Map<String, String> props = defaultConnectorPropsWithTopicCreation();
         props.put(DEFAULT_TOPIC_CREATION_PREFIX + PARTITIONS_CONFIG, 
String.valueOf(0));
         Exception e = assertThrows(ConfigException.class, () -> new 
SourceConnectorConfig(MOCK_PLUGINS, props, true));
-        assertThat(e.getMessage(), containsString("Number of partitions must 
be positive, or -1"));
+        assertTrue(e.getMessage().contains("Number of partitions must be 
positive, or -1"));
 
         props.put(DEFAULT_TOPIC_CREATION_PREFIX + PARTITIONS_CONFIG, 
String.valueOf(DEFAULT_PARTITIONS));
         props.put(DEFAULT_TOPIC_CREATION_PREFIX + REPLICATION_FACTOR_CONFIG, 
String.valueOf(0));
 
         e = assertThrows(ConfigException.class, () -> new 
SourceConnectorConfig(MOCK_PLUGINS, props, true));
-        assertThat(e.getMessage(), containsString("Replication factor must be 
positive and not "
+        assertTrue(e.getMessage().contains("Replication factor must be 
positive and not "
                 + "larger than the number of brokers in the Kafka cluster, or 
-1 to use the "
                 + "broker's default"));
     }
@@ -108,12 +106,12 @@ public class SourceConnectorConfigTest {
             props.put(DEFAULT_TOPIC_CREATION_PREFIX + PARTITIONS_CONFIG, 
String.valueOf(i));
             props.put(DEFAULT_TOPIC_CREATION_PREFIX + 
REPLICATION_FACTOR_CONFIG, String.valueOf(DEFAULT_REPLICATION_FACTOR));
             Exception e = assertThrows(ConfigException.class, () -> new 
SourceConnectorConfig(MOCK_PLUGINS, props, true));
-            assertThat(e.getMessage(), containsString("Number of partitions 
must be positive, or -1"));
+            assertTrue(e.getMessage().contains("Number of partitions must be 
positive, or -1"));
 
             props.put(DEFAULT_TOPIC_CREATION_PREFIX + PARTITIONS_CONFIG, 
String.valueOf(DEFAULT_PARTITIONS));
             props.put(DEFAULT_TOPIC_CREATION_PREFIX + 
REPLICATION_FACTOR_CONFIG, String.valueOf(i));
             e = assertThrows(ConfigException.class, () -> new 
SourceConnectorConfig(MOCK_PLUGINS, props, true));
-            assertThat(e.getMessage(), containsString("Replication factor must 
be positive and not "
+            assertTrue(e.getMessage().contains("Replication factor must be 
positive and not "
                     + "larger than the number of brokers in the Kafka cluster, 
or -1 to use the "
                     + "broker's default"));
         }
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java
index bd7b980b408..11820023f2f 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java
@@ -144,9 +144,6 @@ import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.GRO
 import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG;
 import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG;
 import static org.apache.kafka.connect.sink.SinkTask.TOPICS_CONFIG;
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertInstanceOf;
@@ -343,12 +340,8 @@ public class WorkerTest {
 
         FutureCallback<TargetState> onSecondStart = new FutureCallback<>();
         worker.startConnector(CONNECTOR_ID, connectorProps, ctx, 
connectorStatusListener, TargetState.STARTED, onSecondStart);
-        try {
-            onSecondStart.get(0, TimeUnit.MILLISECONDS);
-            fail("Should have failed while trying to start second connector 
with same name");
-        } catch (ExecutionException e) {
-            assertThat(e.getCause(), instanceOf(ConnectException.class));
-        }
+        Exception exc = assertThrows(ExecutionException.class, () -> 
onSecondStart.get(0, TimeUnit.MILLISECONDS));
+        assertInstanceOf(ConnectException.class, exc.getCause());
 
         assertStatistics(worker, 1, 0);
         assertStartupStatistics(worker, 1, 0, 0, 0);
@@ -565,12 +558,8 @@ public class WorkerTest {
 
         FutureCallback<TargetState> onSecondStart = new FutureCallback<>();
         worker.startConnector(CONNECTOR_ID, connectorProps, ctx, 
connectorStatusListener, TargetState.STARTED, onSecondStart);
-        try {
-            onSecondStart.get(0, TimeUnit.MILLISECONDS);
-            fail("Should have failed while trying to start second connector 
with same name");
-        } catch (ExecutionException e) {
-            assertThat(e.getCause(), instanceOf(ConnectException.class));
-        }
+        Exception exc = assertThrows(ExecutionException.class, () -> 
onSecondStart.get(0, TimeUnit.MILLISECONDS));
+        assertInstanceOf(ConnectException.class, exc.getCause());
 
         Map<String, String> connProps = new HashMap<>(connectorProps);
         connProps.put(ConnectorConfig.TASKS_MAX_CONFIG, "2");
@@ -2631,7 +2620,7 @@ public class WorkerTest {
                 offsetWriter, Thread.currentThread().getContextClassLoader(), 
cb);
         ExecutionException e = assertThrows(ExecutionException.class, () -> 
cb.get(1000, TimeUnit.MILLISECONDS).message());
         assertEquals(ConnectException.class, e.getCause().getClass());
-        assertThat(e.getCause().getMessage(), containsString("Timed out"));
+        assertTrue(e.getCause().getMessage().contains("Timed out"));
 
         verify(offsetStore).start();
         verify(offsetStore, timeout(1000)).stop();
@@ -2664,7 +2653,7 @@ public class WorkerTest {
                 Thread.currentThread().getContextClassLoader(), cb);
         ExecutionException e = assertThrows(ExecutionException.class, () -> 
cb.get(1000, TimeUnit.MILLISECONDS).message());
         assertEquals(ConnectException.class, e.getCause().getClass());
-        assertThat(e.getCause().getMessage(), containsString("Timed out"));
+        assertTrue(e.getCause().getMessage().contains("Timed out"));
 
         verify(admin, timeout(1000)).close();
         verifyKafkaClusterId();
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java
index f85e7e9bb0b..462d02f3e6d 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java
@@ -31,8 +31,6 @@ import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import static 
org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.WorkerLoad;
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 
@@ -187,14 +185,12 @@ public class WorkerTestUtils {
 
         assertEquals(expectedOffset, assignment.offset(), "Wrong offset in " + 
assignment);
 
-        assertThat("Wrong set of assigned connectors in " + assignment,
-                assignment.connectors(), is(expectedAssignedConnectors));
+        assertEquals(expectedAssignedConnectors, assignment.connectors(), 
"Wrong set of assigned connectors in " + assignment);
 
         assertEquals(expectedAssignedTaskNum, assignment.tasks().size(),
                 "Wrong number of assigned tasks in " + assignment);
 
-        assertThat("Wrong set of revoked connectors in " + assignment,
-                assignment.revokedConnectors(), is(expectedRevokedConnectors));
+        assertEquals(expectedRevokedConnectors, 
assignment.revokedConnectors(), "Wrong set of revoked connectors in " + 
assignment);
 
         assertEquals(expectedRevokedTaskNum, assignment.revokedTasks().size(),
                 "Wrong number of revoked tasks in " + assignment);
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java
index 239add620f6..d4f0cf45203 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java
@@ -31,10 +31,9 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class OffsetUtilsTest {
 
@@ -47,7 +46,7 @@ public class OffsetUtilsTest {
     @Test
     public void testValidateFormatNotMap() {
         DataException e = assertThrows(DataException.class, () -> 
OffsetUtils.validateFormat(new Object()));
-        assertThat(e.getMessage(), containsString("Offsets must be specified 
as a Map"));
+        assertTrue(e.getMessage().contains("Offsets must be specified as a 
Map"));
     }
 
     @Test
@@ -56,18 +55,18 @@ public class OffsetUtilsTest {
         offsetData.put("k1", "v1");
         offsetData.put(1, "v2");
         DataException e = assertThrows(DataException.class, () -> 
OffsetUtils.validateFormat(offsetData));
-        assertThat(e.getMessage(), containsString("Offsets may only use String 
keys"));
+        assertTrue(e.getMessage().contains("Offsets may only use String 
keys"));
     }
 
     @Test
     public void testValidateFormatMapWithNonPrimitiveKeys() {
         Map<Object, Object> offsetData = Collections.singletonMap("key", new 
Object());
         DataException e = assertThrows(DataException.class, () -> 
OffsetUtils.validateFormat(offsetData));
-        assertThat(e.getMessage(), containsString("Offsets may only contain 
primitive types as values"));
+        assertTrue(e.getMessage().contains("Offsets may only contain primitive 
types as values"));
 
         Map<Object, Object> offsetData2 = Collections.singletonMap("key", new 
ArrayList<>());
         e = assertThrows(DataException.class, () -> 
OffsetUtils.validateFormat(offsetData2));
-        assertThat(e.getMessage(), containsString("Offsets may only contain 
primitive types as values"));
+        assertTrue(e.getMessage().contains("Offsets may only contain primitive 
types as values"));
     }
 
     @Test
@@ -121,7 +120,7 @@ public class OffsetUtilsTest {
             // Expect no partition to be added to the map since the partition 
key is of an invalid format
             assertEquals(0, connectorPartitions.size());
             assertEquals(1, logCaptureAppender.getMessages().size());
-            assertThat(logCaptureAppender.getMessages().get(0), 
containsString(message));
+            
assertTrue(logCaptureAppender.getMessages().get(0).contains(message));
         }
     }
 
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java
index 0c91e4ff310..268b27e19d8 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java
@@ -27,8 +27,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -72,18 +70,18 @@ public class SinkUtilsTest {
 
         // missing partition key
         ConnectException e = assertThrows(ConnectException.class, () -> 
SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
-        assertThat(e.getMessage(), containsString("The partition for a sink 
connector offset must contain the keys 'kafka_topic' and 'kafka_partition'"));
+        assertTrue(e.getMessage().contains("The partition for a sink connector 
offset must contain the keys 'kafka_topic' and 'kafka_partition'"));
 
         partition.put(SinkUtils.KAFKA_PARTITION_KEY, "not a number");
         // bad partition key
         e = assertThrows(ConnectException.class, () -> 
SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
-        assertThat(e.getMessage(), containsString("Failed to parse the 
following Kafka partition value in the provided offsets: 'not a number'"));
+        assertTrue(e.getMessage().contains("Failed to parse the following 
Kafka partition value in the provided offsets: 'not a number'"));
 
         partition.remove(SinkUtils.KAFKA_TOPIC_KEY);
         partition.put(SinkUtils.KAFKA_PARTITION_KEY, "5");
         // missing topic key
         e = assertThrows(ConnectException.class, () -> 
SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
-        assertThat(e.getMessage(), containsString("The partition for a sink 
connector offset must contain the keys 'kafka_topic' and 'kafka_partition'"));
+        assertTrue(e.getMessage().contains("The partition for a sink connector 
offset must contain the keys 'kafka_topic' and 'kafka_partition'"));
     }
 
     @Test
@@ -97,12 +95,12 @@ public class SinkUtilsTest {
 
         // missing offset key
         ConnectException e = assertThrows(ConnectException.class, () -> 
SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
-        assertThat(e.getMessage(), containsString("The offset for a sink 
connector should either be null or contain the key 'kafka_offset'"));
+        assertTrue(e.getMessage().contains("The offset for a sink connector 
should either be null or contain the key 'kafka_offset'"));
 
         // bad offset key
         offset.put(SinkUtils.KAFKA_OFFSET_KEY, "not a number");
         e = assertThrows(ConnectException.class, () -> 
SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
-        assertThat(e.getMessage(), containsString("Failed to parse the 
following Kafka offset value in the provided offsets: 'not a number'"));
+        assertTrue(e.getMessage().contains("Failed to parse the following 
Kafka offset value in the provided offsets: 'not a number'"));
     }
 
     @Test
@@ -160,7 +158,7 @@ public class SinkUtilsTest {
         Map<Map<String, ?>, Map<String, ?>> partitionOffsets = new HashMap<>();
         partitionOffsets.put(null, offset);
         ConnectException e = assertThrows(ConnectException.class, () -> 
SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
-        assertThat(e.getMessage(), containsString("The partition for a sink 
connector offset cannot be null or missing"));
+        assertTrue(e.getMessage().contains("The partition for a sink connector 
offset cannot be null or missing"));
 
         Map<String, Object> partitionMap = new HashMap<>();
         partitionMap.put(SinkUtils.KAFKA_TOPIC_KEY, "topic");
@@ -169,14 +167,14 @@ public class SinkUtilsTest {
         partitionOffsets.put(partitionMap, offset);
 
         e = assertThrows(ConnectException.class, () -> 
SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
-        assertThat(e.getMessage(), containsString("Kafka partitions must be 
valid numbers and may not be null"));
+        assertTrue(e.getMessage().contains("Kafka partitions must be valid 
numbers and may not be null"));
     }
 
     @Test
     public void testNullTopic() {
         Map<Map<String, ?>, Map<String, ?>> partitionOffsets = 
createPartitionOffsetMap(null, "10", 100);
         ConnectException e = assertThrows(ConnectException.class, () -> 
SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
-        assertThat(e.getMessage(), containsString("Kafka topic names must be 
valid strings and may not be null"));
+        assertTrue(e.getMessage().contains("Kafka topic names must be valid 
strings and may not be null"));
     }
 
     private Map<Map<String, ?>, Map<String, ?>> 
createPartitionOffsetMap(String topic, Object partition, Object offset) {
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java
index 580488a4732..5d764d16031 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java
@@ -31,8 +31,10 @@ import org.apache.kafka.connect.transforms.RegexRouter;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
@@ -60,10 +62,6 @@ import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.CON
 import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.GROUP_ID_CONFIG;
 import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG;
 import static 
org.apache.kafka.connect.runtime.distributed.DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG;
-import static org.hamcrest.CoreMatchers.hasItem;
-import static org.hamcrest.CoreMatchers.hasItems;
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNull;
@@ -132,9 +130,9 @@ public class TopicCreationTest {
 
         assertTrue(topicCreation.isTopicCreationEnabled());
         assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC));
-        assertThat(topicCreation.defaultTopicGroup(), 
is(groups.get(DEFAULT_TOPIC_CREATION_GROUP)));
+        assertEquals(topicCreation.defaultTopicGroup(), 
groups.get(DEFAULT_TOPIC_CREATION_GROUP));
         assertEquals(2, topicCreation.topicGroups().size());
-        assertThat(topicCreation.topicGroups().keySet(), hasItems(FOO_GROUP, 
BAR_GROUP));
+        assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), 
topicCreation.topicGroups().keySet());
         assertEquals(topicCreation.defaultTopicGroup(), 
topicCreation.findFirstGroup(FOO_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
@@ -153,7 +151,7 @@ public class TopicCreationTest {
         assertFalse(topicCreation.isTopicCreationEnabled());
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
         assertNull(topicCreation.defaultTopicGroup());
-        assertThat(topicCreation.topicGroups(), is(Collections.emptyMap()));
+        assertEquals(Collections.emptyMap(), topicCreation.topicGroups());
         assertNull(topicCreation.findFirstGroup(FOO_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
@@ -168,7 +166,7 @@ public class TopicCreationTest {
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
         assertNull(topicCreation.defaultTopicGroup());
         assertEquals(0, topicCreation.topicGroups().size());
-        assertThat(topicCreation.topicGroups(), is(Collections.emptyMap()));
+        assertEquals(Collections.emptyMap(), topicCreation.topicGroups());
         assertNull(topicCreation.findFirstGroup(FOO_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
@@ -186,14 +184,14 @@ public class TopicCreationTest {
         assertTrue(sourceConfig.usesTopicCreation());
         assertEquals(DEFAULT_REPLICATION_FACTOR, (short) 
sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP));
         assertEquals(DEFAULT_PARTITIONS, (int) 
sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP));
-        
assertThat(sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.singletonList(".*")));
-        
assertThat(sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.emptyList()));
-        
assertThat(sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP),
 is(Collections.emptyMap()));
+        assertEquals(Collections.singletonList(".*"), 
sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyList(), 
sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyMap(), 
sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP));
 
         // verify topic creation group is instantiated correctly
         Map<String, TopicCreationGroup> groups = 
TopicCreationGroup.configuredGroups(sourceConfig);
         assertEquals(1, groups.size());
-        assertThat(groups.keySet(), hasItem(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.singleton(DEFAULT_TOPIC_CREATION_GROUP), 
groups.keySet());
 
         // verify topic creation
         TopicCreation topicCreation = 
TopicCreation.newTopicCreation(workerConfig, groups);
@@ -204,7 +202,7 @@ public class TopicCreationTest {
         assertEquals(DEFAULT_TOPIC_CREATION_GROUP, group.name());
         assertTrue(topicCreation.isTopicCreationEnabled());
         assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC));
-        assertThat(topicCreation.topicGroups(), is(Collections.emptyMap()));
+        assertEquals(Collections.emptyMap(), topicCreation.topicGroups());
         assertEquals(topicCreation.defaultTopicGroup(), 
topicCreation.findFirstGroup(FOO_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
@@ -214,7 +212,7 @@ public class TopicCreationTest {
         assertEquals(FOO_TOPIC, topicSpec.name());
         assertEquals(DEFAULT_REPLICATION_FACTOR, 
topicSpec.replicationFactor());
         assertEquals(DEFAULT_PARTITIONS, topicSpec.numPartitions());
-        assertThat(topicSpec.configs(), is(Collections.emptyMap()));
+        assertEquals(Collections.emptyMap(), topicSpec.configs());
     }
 
     @Test
@@ -240,14 +238,14 @@ public class TopicCreationTest {
         assertTrue(sourceConfig.usesTopicCreation());
         assertEquals(replicas, (short) 
sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP));
         assertEquals(partitions, (int) 
sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP));
-        
assertThat(sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.singletonList(".*")));
-        
assertThat(sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.emptyList()));
-        
assertThat(sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP),
 is(topicProps));
+        assertEquals(Collections.singletonList(".*"), 
sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyList(), 
sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(topicProps, 
sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP));
 
         // verify topic creation group is instantiated correctly
         Map<String, TopicCreationGroup> groups = 
TopicCreationGroup.configuredGroups(sourceConfig);
         assertEquals(1, groups.size());
-        assertThat(groups.keySet(), hasItem(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.singleton(DEFAULT_TOPIC_CREATION_GROUP), 
groups.keySet());
 
         // verify topic creation
         TopicCreation topicCreation = 
TopicCreation.newTopicCreation(workerConfig, groups);
@@ -258,7 +256,7 @@ public class TopicCreationTest {
         assertEquals(DEFAULT_TOPIC_CREATION_GROUP, group.name());
         assertTrue(topicCreation.isTopicCreationEnabled());
         assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC));
-        assertThat(topicCreation.topicGroups(), is(Collections.emptyMap()));
+        assertEquals(Collections.emptyMap(), topicCreation.topicGroups());
         assertEquals(topicCreation.defaultTopicGroup(), 
topicCreation.findFirstGroup(FOO_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
@@ -268,7 +266,7 @@ public class TopicCreationTest {
         assertEquals(FOO_TOPIC, topicSpec.name());
         assertEquals(replicas, topicSpec.replicationFactor());
         assertEquals(partitions, topicSpec.numPartitions());
-        assertThat(topicSpec.configs(), is(topicProps));
+        assertEquals(topicProps, topicSpec.configs());
     }
 
     @Test
@@ -291,14 +289,14 @@ public class TopicCreationTest {
         assertTrue(sourceConfig.usesTopicCreation());
         assertEquals(DEFAULT_REPLICATION_FACTOR, (short) 
sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP));
         assertEquals(partitions, (int) 
sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP));
-        
assertThat(sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.singletonList(".*")));
-        
assertThat(sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.emptyList()));
-        
assertThat(sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP),
 is(Collections.emptyMap()));
+        assertEquals(Collections.singletonList(".*"), 
sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyList(), 
sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyMap(), 
sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP));
 
         // verify topic creation group is instantiated correctly
         Map<String, TopicCreationGroup> groups = 
TopicCreationGroup.configuredGroups(sourceConfig);
         assertEquals(2, groups.size());
-        assertThat(groups.keySet(), hasItems(DEFAULT_TOPIC_CREATION_GROUP, 
FOO_GROUP));
+        assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, 
FOO_GROUP)), groups.keySet());
 
         // verify topic creation
         TopicCreation topicCreation = 
TopicCreation.newTopicCreation(workerConfig, groups);
@@ -317,7 +315,7 @@ public class TopicCreationTest {
         assertTrue(topicCreation.isTopicCreationEnabled());
         assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC));
         assertEquals(1, topicCreation.topicGroups().size());
-        assertThat(topicCreation.topicGroups().keySet(), hasItems(FOO_GROUP));
+        assertEquals(Collections.singleton(FOO_GROUP), 
topicCreation.topicGroups().keySet());
         assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
@@ -327,13 +325,13 @@ public class TopicCreationTest {
         assertEquals(BAR_TOPIC, defaultTopicSpec.name());
         assertEquals(DEFAULT_REPLICATION_FACTOR, 
defaultTopicSpec.replicationFactor());
         assertEquals(partitions, defaultTopicSpec.numPartitions());
-        assertThat(defaultTopicSpec.configs(), is(Collections.emptyMap()));
+        assertEquals(Collections.emptyMap(), defaultTopicSpec.configs());
 
         NewTopic fooTopicSpec = 
topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC);
         assertEquals(FOO_TOPIC, fooTopicSpec.name());
         assertEquals(fooReplicas, fooTopicSpec.replicationFactor());
         assertEquals(partitions, fooTopicSpec.numPartitions());
-        assertThat(fooTopicSpec.configs(), is(topicProps));
+        assertEquals(topicProps, fooTopicSpec.configs());
     }
 
     @Test
@@ -356,14 +354,14 @@ public class TopicCreationTest {
         assertTrue(sourceConfig.usesTopicCreation());
         assertEquals(DEFAULT_REPLICATION_FACTOR, (short) 
sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP));
         assertEquals(partitions, (int) 
sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP));
-        
assertThat(sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.singletonList(".*")));
-        
assertThat(sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.emptyList()));
-        
assertThat(sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP),
 is(Collections.emptyMap()));
+        assertEquals(Collections.singletonList(".*"), 
sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyList(), 
sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyMap(), 
sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP));
 
         // verify topic creation group is instantiated correctly
         Map<String, TopicCreationGroup> groups = 
TopicCreationGroup.configuredGroups(sourceConfig);
         assertEquals(2, groups.size());
-        assertThat(groups.keySet(), hasItems(DEFAULT_TOPIC_CREATION_GROUP, 
FOO_GROUP));
+        assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, 
FOO_GROUP)), groups.keySet());
 
         // verify topic creation
         TopicCreation topicCreation = 
TopicCreation.newTopicCreation(workerConfig, groups);
@@ -383,7 +381,7 @@ public class TopicCreationTest {
         assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC));
         assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC));
         assertEquals(1, topicCreation.topicGroups().size());
-        assertThat(topicCreation.topicGroups().keySet(), hasItems(FOO_GROUP));
+        assertEquals(Collections.singleton(FOO_GROUP), 
topicCreation.topicGroups().keySet());
         assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC));
         assertEquals(fooGroup, topicCreation.findFirstGroup(BAR_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
@@ -396,13 +394,13 @@ public class TopicCreationTest {
         assertEquals(FOO_TOPIC, fooTopicSpec.name());
         assertEquals(fooReplicas, fooTopicSpec.replicationFactor());
         assertEquals(partitions, fooTopicSpec.numPartitions());
-        assertThat(fooTopicSpec.configs(), is(topicProps));
+        assertEquals(topicProps, fooTopicSpec.configs());
 
         NewTopic barTopicSpec = 
topicCreation.findFirstGroup(BAR_TOPIC).newTopic(BAR_TOPIC);
         assertEquals(BAR_TOPIC, barTopicSpec.name());
         assertEquals(fooReplicas, barTopicSpec.replicationFactor());
         assertEquals(partitions, barTopicSpec.numPartitions());
-        assertThat(barTopicSpec.configs(), is(topicProps));
+        assertEquals(topicProps, barTopicSpec.configs());
     }
 
     @Test
@@ -433,14 +431,14 @@ public class TopicCreationTest {
         assertTrue(sourceConfig.usesTopicCreation());
         assertEquals(DEFAULT_REPLICATION_FACTOR, (short) 
sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP));
         assertEquals(partitions, (int) 
sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP));
-        
assertThat(sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.singletonList(".*")));
-        
assertThat(sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.emptyList()));
-        
assertThat(sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP),
 is(Collections.emptyMap()));
+        assertEquals(Collections.singletonList(".*"), 
sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyList(), 
sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyMap(), 
sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP));
 
         // verify topic creation group is instantiated correctly
         Map<String, TopicCreationGroup> groups = 
TopicCreationGroup.configuredGroups(sourceConfig);
         assertEquals(3, groups.size());
-        assertThat(groups.keySet(), hasItems(DEFAULT_TOPIC_CREATION_GROUP, 
FOO_GROUP, BAR_GROUP));
+        assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, 
FOO_GROUP, BAR_GROUP)), groups.keySet());
 
         // verify topic creation
         TopicCreation topicCreation = 
TopicCreation.newTopicCreation(workerConfig, groups);
@@ -464,7 +462,7 @@ public class TopicCreationTest {
         assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC));
         assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC));
         assertEquals(2, topicCreation.topicGroups().size());
-        assertThat(topicCreation.topicGroups().keySet(), hasItems(FOO_GROUP, 
BAR_GROUP));
+        assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), 
topicCreation.topicGroups().keySet());
         assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC));
         assertEquals(barGroup, topicCreation.findFirstGroup(BAR_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
@@ -478,19 +476,19 @@ public class TopicCreationTest {
         assertEquals(otherTopic, defaultTopicSpec.name());
         assertEquals(DEFAULT_REPLICATION_FACTOR, 
defaultTopicSpec.replicationFactor());
         assertEquals(partitions, defaultTopicSpec.numPartitions());
-        assertThat(defaultTopicSpec.configs(), is(Collections.emptyMap()));
+        assertEquals(Collections.emptyMap(), defaultTopicSpec.configs());
 
         NewTopic fooTopicSpec = 
topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC);
         assertEquals(FOO_TOPIC, fooTopicSpec.name());
         assertEquals(fooReplicas, fooTopicSpec.replicationFactor());
         assertEquals(partitions, fooTopicSpec.numPartitions());
-        assertThat(fooTopicSpec.configs(), is(fooTopicProps));
+        assertEquals(fooTopicProps, fooTopicSpec.configs());
 
         NewTopic barTopicSpec = 
topicCreation.findFirstGroup(BAR_TOPIC).newTopic(BAR_TOPIC);
         assertEquals(BAR_TOPIC, barTopicSpec.name());
         assertEquals(DEFAULT_REPLICATION_FACTOR, 
barTopicSpec.replicationFactor());
         assertEquals(barPartitions, barTopicSpec.numPartitions());
-        assertThat(barTopicSpec.configs(), is(barTopicProps));
+        assertEquals(barTopicProps, barTopicSpec.configs());
     }
 
     @Test
@@ -510,9 +508,9 @@ public class TopicCreationTest {
 
         assertTrue(topicCreation.isTopicCreationEnabled());
         assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC));
-        assertThat(topicCreation.defaultTopicGroup(), 
is(groups.get(DEFAULT_TOPIC_CREATION_GROUP)));
+        assertEquals(groups.get(DEFAULT_TOPIC_CREATION_GROUP), 
topicCreation.defaultTopicGroup());
         assertEquals(2, topicCreation.topicGroups().size());
-        assertThat(topicCreation.topicGroups().keySet(), hasItems(FOO_GROUP, 
BAR_GROUP));
+        assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), 
topicCreation.topicGroups().keySet());
         assertEquals(topicCreation.defaultTopicGroup(), 
topicCreation.findFirstGroup(FOO_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
         assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC));
@@ -565,14 +563,14 @@ public class TopicCreationTest {
         assertTrue(sourceConfig.usesTopicCreation());
         assertEquals(DEFAULT_REPLICATION_FACTOR, (short) 
sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP));
         assertEquals(partitions, (int) 
sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP));
-        
assertThat(sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.singletonList(".*")));
-        
assertThat(sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP), 
is(Collections.emptyList()));
-        
assertThat(sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP),
 is(Collections.emptyMap()));
+        assertEquals(Collections.singletonList(".*"), 
sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyList(), 
sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP));
+        assertEquals(Collections.emptyMap(), 
sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP));
 
         // verify topic creation group is instantiated correctly
         Map<String, TopicCreationGroup> groups = 
TopicCreationGroup.configuredGroups(sourceConfig);
         assertEquals(3, groups.size());
-        assertThat(groups.keySet(), hasItems(DEFAULT_TOPIC_CREATION_GROUP, 
FOO_GROUP, BAR_GROUP));
+        assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, 
FOO_GROUP, BAR_GROUP)), groups.keySet());
 
         // verify topic creation
         TopicCreation topicCreation = 
TopicCreation.newTopicCreation(workerConfig, groups);
@@ -596,7 +594,7 @@ public class TopicCreationTest {
         assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC));
         assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC));
         assertEquals(2, topicCreation.topicGroups().size());
-        assertThat(topicCreation.topicGroups().keySet(), hasItems(FOO_GROUP, 
BAR_GROUP));
+        assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), 
topicCreation.topicGroups().keySet());
         assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC));
         assertEquals(barGroup, topicCreation.findFirstGroup(BAR_TOPIC));
         topicCreation.addTopic(FOO_TOPIC);
@@ -610,19 +608,19 @@ public class TopicCreationTest {
         assertEquals(otherTopic, defaultTopicSpec.name());
         assertEquals(DEFAULT_REPLICATION_FACTOR, 
defaultTopicSpec.replicationFactor());
         assertEquals(partitions, defaultTopicSpec.numPartitions());
-        assertThat(defaultTopicSpec.configs(), is(Collections.emptyMap()));
+        assertEquals(Collections.emptyMap(), defaultTopicSpec.configs());
 
         NewTopic fooTopicSpec = 
topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC);
         assertEquals(FOO_TOPIC, fooTopicSpec.name());
         assertEquals(fooReplicas, fooTopicSpec.replicationFactor());
         assertEquals(partitions, fooTopicSpec.numPartitions());
-        assertThat(fooTopicSpec.configs(), is(fooTopicProps));
+        assertEquals(fooTopicProps, fooTopicSpec.configs());
 
         NewTopic barTopicSpec = 
topicCreation.findFirstGroup(BAR_TOPIC).newTopic(BAR_TOPIC);
         assertEquals(BAR_TOPIC, barTopicSpec.name());
         assertEquals(DEFAULT_REPLICATION_FACTOR, 
barTopicSpec.replicationFactor());
         assertEquals(barPartitions, barTopicSpec.numPartitions());
-        assertThat(barTopicSpec.configs(), is(barTopicProps));
+        assertEquals(barTopicProps, barTopicSpec.configs());
 
         List<TransformationStage<SourceRecord>> transformationStages = 
sourceConfig.transformationStages();
         assertEquals(2, transformationStages.size());
diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java 
b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java
index 03bf8155ca7..8c0a496d19f 100644
--- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java
+++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java
@@ -71,9 +71,6 @@ import java.util.stream.Stream;
 import static java.util.Collections.singletonList;
 import static 
org.apache.kafka.raft.RaftClientTestContext.Builder.DEFAULT_ELECTION_TIMEOUT_MS;
 import static org.apache.kafka.test.TestUtils.assertFutureThrows;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.in;
-import static org.hamcrest.Matchers.is;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -806,7 +803,7 @@ public class KafkaRaftClientTest {
         context.pollUntilRequest();
 
         RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest();
-        assertThat(fetchRequest.destination().id(), is(in(voters)));
+        assertTrue(voters.contains(fetchRequest.destination().id()));
         context.assertFetchRequestData(fetchRequest, 0, 0L, 0);
 
         context.deliverResponse(
@@ -1836,7 +1833,7 @@ public class KafkaRaftClientTest {
 
         context.pollUntilRequest();
         RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest();
-        assertThat(fetchRequest.destination().id(), is(in(voters)));
+        assertTrue(voters.contains(fetchRequest.destination().id()));
         context.assertFetchRequestData(fetchRequest, 0, 0L, 0);
 
         context.deliverResponse(
diff --git 
a/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashMapTest.java
 
b/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashMapTest.java
index 799a7fc187c..ca907eb22aa 100644
--- 
a/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashMapTest.java
+++ 
b/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashMapTest.java
@@ -23,13 +23,12 @@ import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.containsInAnyOrder;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -65,8 +64,8 @@ public class TimelineHashMapTest {
         TimelineHashMap<Integer, String> map = new TimelineHashMap<>(registry, 
1);
         map.put(123, "abc");
         map.put(456, "def");
-        assertThat(iteratorToList(map.keySet().iterator()), 
containsInAnyOrder(123, 456));
-        assertThat(iteratorToList(map.values().iterator()), 
containsInAnyOrder("abc", "def"));
+        
assertTrue(iteratorToList(map.keySet().iterator()).containsAll(Arrays.asList(123,
 456)));
+        
assertTrue(iteratorToList(map.values().iterator()).containsAll(Arrays.asList("abc",
 "def")));
         assertTrue(map.containsValue("abc"));
         assertTrue(map.containsKey(456));
         assertFalse(map.isEmpty());
@@ -77,7 +76,7 @@ public class TimelineHashMapTest {
         snapshotValues.add(iter.next().getValue());
         snapshotValues.add(iter.next().getValue());
         assertFalse(iter.hasNext());
-        assertThat(snapshotValues, containsInAnyOrder("abc", "def"));
+        assertTrue(snapshotValues.containsAll(Arrays.asList("abc", "def")));
         assertFalse(map.isEmpty(2));
         assertTrue(map.isEmpty());
     }
diff --git 
a/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashSetTest.java
 
b/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashSetTest.java
index 1890750f114..172462bef62 100644
--- 
a/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashSetTest.java
+++ 
b/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashSetTest.java
@@ -24,8 +24,6 @@ import org.junit.jupiter.api.Timeout;
 
 import java.util.Arrays;
 
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.containsInAnyOrder;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -65,10 +63,8 @@ public class TimelineHashSetTest {
         assertFalse(set.removeAll(Arrays.asList("d")));
         registry.getOrCreateSnapshot(2);
         assertTrue(set.removeAll(Arrays.asList("c")));
-        assertThat(TimelineHashMapTest.iteratorToList(set.iterator(2)),
-            containsInAnyOrder("a", "b", "c"));
-        assertThat(TimelineHashMapTest.iteratorToList(set.iterator()),
-            containsInAnyOrder("a", "b"));
+        
assertTrue(TimelineHashMapTest.iteratorToList(set.iterator(2)).containsAll(Arrays.asList("a",
 "b", "c")));
+        
assertTrue(TimelineHashMapTest.iteratorToList(set.iterator()).containsAll(Arrays.asList("a",
 "b")));
         assertEquals(2, set.size());
         assertEquals(3, set.size(2));
         set.clear();
@@ -101,14 +97,10 @@ public class TimelineHashSetTest {
         assertFalse(set.containsAll(Arrays.asList("abc", "def", "xyz")));
         assertTrue(set.removeAll(Arrays.asList("def", "ghi", "xyz")));
         registry.getOrCreateSnapshot(5);
-        assertThat(TimelineHashMapTest.iteratorToList(set.iterator(5)),
-            containsInAnyOrder("abc", "jkl"));
-        assertThat(TimelineHashMapTest.iteratorToList(set.iterator()),
-            containsInAnyOrder("abc", "jkl"));
+        
assertTrue(TimelineHashMapTest.iteratorToList(set.iterator(5)).containsAll(Arrays.asList("abc",
 "jkl")));
+        
assertTrue(TimelineHashMapTest.iteratorToList(set.iterator()).containsAll(Arrays.asList("abc",
 "jkl")));
         set.removeIf(e -> e.startsWith("a"));
-        assertThat(TimelineHashMapTest.iteratorToList(set.iterator()),
-            containsInAnyOrder("jkl"));
-        assertThat(TimelineHashMapTest.iteratorToList(set.iterator(5)),
-            containsInAnyOrder("abc", "jkl"));
+        
assertTrue(TimelineHashMapTest.iteratorToList(set.iterator()).contains("jkl"));
+        
assertTrue(TimelineHashMapTest.iteratorToList(set.iterator(5)).containsAll(Arrays.asList("abc",
 "jkl")));
     }
 }

Reply via email to