This is an automated email from the ASF dual-hosted git repository.
showuon pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new bf7cd675f8 MINOR: Remove duplicated test cases in MetadataVersionTest
(#12116)
bf7cd675f8 is described below
commit bf7cd675f8b35b806b1414c68479e69055df4a91
Author: dengziming <[email protected]>
AuthorDate: Wed May 4 11:10:39 2022 +0800
MINOR: Remove duplicated test cases in MetadataVersionTest (#12116)
These tests belongs to ApiVersionsResponseTest, and accidentally copied
them to MetadataVersionTest when working on #12072.
Reviewer: Luke Chen <[email protected]>
---
checkstyle/suppressions.xml | 2 -
.../common/requests/ApiVersionsResponseTest.java | 2 +-
.../main/scala/kafka/server/BrokerFeatures.scala | 2 +-
.../kafka/server/common/MetadataVersionTest.java | 156 ++++++---------------
4 files changed, 42 insertions(+), 120 deletions(-)
diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml
index 99493bf779..0cc6c831d9 100644
--- a/checkstyle/suppressions.xml
+++ b/checkstyle/suppressions.xml
@@ -303,8 +303,6 @@
files="(MetadataImage).java"/>
<suppress checks="ImportControl"
files="ApiVersionsResponse.java"/>
- <suppress checks="AvoidStarImport"
- files="MetadataVersionTest.java"/>
<!-- Storage -->
<suppress checks="(CyclomaticComplexity|ParameterNumber)"
diff --git
a/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java
b/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java
index 15f3cd240a..c3fa6b892e 100644
---
a/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java
+++
b/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java
@@ -168,7 +168,7 @@ public class ApiVersionsResponseTest {
null,
ListenerType.ZK_BROKER
);
- assertEquals(new HashSet<ApiKeys>(ApiKeys.zkBrokerApis()),
apiKeysInResponse(response));
+ assertEquals(new HashSet<>(ApiKeys.zkBrokerApis()),
apiKeysInResponse(response));
assertEquals(AbstractResponse.DEFAULT_THROTTLE_TIME,
response.throttleTimeMs());
assertTrue(response.data().supportedFeatures().isEmpty());
assertTrue(response.data().finalizedFeatures().isEmpty());
diff --git a/core/src/main/scala/kafka/server/BrokerFeatures.scala
b/core/src/main/scala/kafka/server/BrokerFeatures.scala
index 4f0fe37989..9511172c7e 100644
--- a/core/src/main/scala/kafka/server/BrokerFeatures.scala
+++ b/core/src/main/scala/kafka/server/BrokerFeatures.scala
@@ -38,7 +38,7 @@ class BrokerFeatures private (@volatile var
supportedFeatures: Features[Supporte
}
/**
- * Returns the default finalized features that a new Kafka cluster with IBP
config >= KAFKA_2_7_IV0
+ * Returns the default finalized features that a new Kafka cluster with IBP
config >= IBP_2_7_IV0
* needs to be bootstrapped with.
*/
def defaultFinalizedFeatures: Features[FinalizedVersionRange] = {
diff --git
a/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java
b/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java
index 7ad8754b75..2293572b28 100644
---
a/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java
+++
b/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java
@@ -17,29 +17,52 @@
package org.apache.kafka.server.common;
-import java.util.Arrays;
-import java.util.HashSet;
-import org.apache.kafka.common.feature.Features;
-import org.apache.kafka.common.feature.FinalizedVersionRange;
-import org.apache.kafka.common.feature.SupportedVersionRange;
-import org.apache.kafka.common.message.ApiMessageType.ListenerType;
-import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion;
-import
org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKey;
-import
org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey;
-import org.apache.kafka.common.protocol.ApiKeys;
-import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.RecordVersion;
-import org.apache.kafka.common.requests.AbstractResponse;
-import org.apache.kafka.common.requests.ApiVersionsResponse;
-import org.apache.kafka.common.utils.Utils;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
-import static org.apache.kafka.server.common.MetadataVersion.*;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_10_0_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_10_0_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_10_1_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_10_1_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_10_1_IV2;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_10_2_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_11_0_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_11_0_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_11_0_IV2;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_8_0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_8_1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_8_2;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_0_9_0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_1_0_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_1_1_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_0_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_0_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_1_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_1_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_1_IV2;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_2_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_2_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_3_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_3_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_4_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_4_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_5_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_7_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_7_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_7_IV2;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_8_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_2_8_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_3_0_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_3_0_IV1;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_3_1_IV0;
+import static org.apache.kafka.server.common.MetadataVersion.IBP_3_2_IV0;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import org.junit.jupiter.api.Test;
class MetadataVersionTest {
@@ -241,103 +264,4 @@ class MetadataVersionTest {
assertEquals("3.2-IV0", IBP_3_2_IV0.version());
}
- @Test
- public void shouldCreateApiResponseOnlyWithKeysSupportedByMagicValue() {
- ApiVersionsResponse response =
ApiVersionsResponse.createApiVersionsResponse(
- 10,
- RecordVersion.V1,
- Features.emptySupportedFeatures(),
- Features.emptyFinalizedFeatures(),
- ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH,
- null,
- ListenerType.ZK_BROKER
- );
- verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1);
- assertEquals(10, response.throttleTimeMs());
- assertTrue(response.data().supportedFeatures().isEmpty());
- assertTrue(response.data().finalizedFeatures().isEmpty());
- assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH,
response.data().finalizedFeaturesEpoch());
- }
-
- @Test
- public void
shouldReturnFeatureKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle() {
- ApiVersionsResponse response =
ApiVersionsResponse.createApiVersionsResponse(
- 10,
- RecordVersion.V1,
- Features.supportedFeatures(
- Utils.mkMap(Utils.mkEntry("feature", new
SupportedVersionRange((short) 1, (short) 4)))),
- Features.finalizedFeatures(
- Utils.mkMap(Utils.mkEntry("feature", new
FinalizedVersionRange((short) 2, (short) 3)))),
- 10L,
- null,
- ListenerType.ZK_BROKER
- );
-
- verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1);
- assertEquals(10, response.throttleTimeMs());
- assertEquals(1, response.data().supportedFeatures().size());
- SupportedFeatureKey sKey =
response.data().supportedFeatures().find("feature");
- assertNotNull(sKey);
- assertEquals(1, sKey.minVersion());
- assertEquals(4, sKey.maxVersion());
- assertEquals(1, response.data().finalizedFeatures().size());
- FinalizedFeatureKey fKey =
response.data().finalizedFeatures().find("feature");
- assertNotNull(fKey);
- assertEquals(2, fKey.minVersionLevel());
- assertEquals(3, fKey.maxVersionLevel());
- assertEquals(10, response.data().finalizedFeaturesEpoch());
- }
-
- private void verifyApiKeysForMagic(ApiVersionsResponse response, Byte
maxMagic) {
- for (ApiVersion version : response.data().apiKeys()) {
-
assertTrue(ApiKeys.forId(version.apiKey()).minRequiredInterBrokerMagic <=
maxMagic);
- }
- }
-
- @Test
- public void
shouldReturnAllKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle() {
- ApiVersionsResponse response =
ApiVersionsResponse.createApiVersionsResponse(
- AbstractResponse.DEFAULT_THROTTLE_TIME,
- RecordVersion.current(),
- Features.emptySupportedFeatures(),
- Features.emptyFinalizedFeatures(),
- ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH,
- null,
- ListenerType.ZK_BROKER
- );
- assertEquals(new HashSet<ApiKeys>(ApiKeys.zkBrokerApis()),
apiKeysInResponse(response));
- assertEquals(AbstractResponse.DEFAULT_THROTTLE_TIME,
response.throttleTimeMs());
- assertTrue(response.data().supportedFeatures().isEmpty());
- assertTrue(response.data().finalizedFeatures().isEmpty());
- assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH,
response.data().finalizedFeaturesEpoch());
- }
-
- @Test
- public void testMetadataQuorumApisAreDisabled() {
- ApiVersionsResponse response =
ApiVersionsResponse.createApiVersionsResponse(
- AbstractResponse.DEFAULT_THROTTLE_TIME,
- RecordVersion.current(),
- Features.emptySupportedFeatures(),
- Features.emptyFinalizedFeatures(),
- ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH,
- null,
- ListenerType.ZK_BROKER
- );
-
- // Ensure that APIs needed for the KRaft mode are not exposed through
ApiVersions until we are ready for them
- HashSet<ApiKeys> exposedApis = apiKeysInResponse(response);
- assertFalse(exposedApis.contains(ApiKeys.ENVELOPE));
- assertFalse(exposedApis.contains(ApiKeys.VOTE));
- assertFalse(exposedApis.contains(ApiKeys.BEGIN_QUORUM_EPOCH));
- assertFalse(exposedApis.contains(ApiKeys.END_QUORUM_EPOCH));
- assertFalse(exposedApis.contains(ApiKeys.DESCRIBE_QUORUM));
- }
-
- private HashSet<ApiKeys> apiKeysInResponse(ApiVersionsResponse
apiVersions) {
- HashSet<ApiKeys> apiKeys = new HashSet<>();
- for (ApiVersion version : apiVersions.data().apiKeys()) {
- apiKeys.add(ApiKeys.forId(version.apiKey()));
- }
- return apiKeys;
- }
}