[kafka-site] branch asf-site updated: MINOR; Update documentation for 3.3.1 (#451)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new f6d8c2aa MINOR; Update documentation for 3.3.1 (#451)
f6d8c2aa is described below

commit f6d8c2aa0427d6013556a9899355035e9001592e
Author: José Armando García Sancio 
AuthorDate: Thu Sep 29 16:44:32 2022 -0700

MINOR; Update documentation for 3.3.1 (#451)

Reviewers: Ismael Juma 
---
 33/generated/connect_rest.yaml | 2 +-
 33/js/templateData.js  | 2 +-
 33/upgrade.html| 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/33/generated/connect_rest.yaml b/33/generated/connect_rest.yaml
index 834edc63..d82982de 100644
--- a/33/generated/connect_rest.yaml
+++ b/33/generated/connect_rest.yaml
@@ -8,7 +8,7 @@ info:
 name: Apache 2.0
 url: https://www.apache.org/licenses/LICENSE-2.0.html
   title: Kafka Connect REST API
-  version: 3.3.0-SNAPSHOT
+  version: 3.3.1
 paths:
   /:
 get:
diff --git a/33/js/templateData.js b/33/js/templateData.js
index 6b773468..dd5e02d9 100644
--- a/33/js/templateData.js
+++ b/33/js/templateData.js
@@ -19,6 +19,6 @@ limitations under the License.
 var context={
 "version": "33",
 "dotVersion": "3.3",
-"fullDotVersion": "3.3.0",
+"fullDotVersion": "3.3.1",
 "scalaVersion": "2.13"
 };
diff --git a/33/upgrade.html b/33/upgrade.html
index 29192698..8fc5c177 100644
--- a/33/upgrade.html
+++ b/33/upgrade.html
@@ -19,7 +19,7 @@
 
 

[kafka] branch trunk updated: KAFKA-14248; Fix flaky test PlaintextAdminIntegrationTest.testCreateTopicsReturnsConfigs (#12669)

2022-09-29 Thread jgus
This is an automated email from the ASF dual-hosted git repository.

jgus pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bc95aa21169 KAFKA-14248; Fix flaky test 
PlaintextAdminIntegrationTest.testCreateTopicsReturnsConfigs (#12669)
bc95aa21169 is described below

commit bc95aa21169b8b5b9b8a4b609e88cc125157234b
Author: Divij Vaidya 
AuthorDate: Fri Sep 30 01:24:03 2022 +0200

KAFKA-14248; Fix flaky test 
PlaintextAdminIntegrationTest.testCreateTopicsReturnsConfigs (#12669)

The test is failing intermittently because we do not wait for propagation 
of the altered config (LogRetentionTimeMillisProp) across all brokers before 
proceeding ahead with the test.

This PR makes the following changes:
1. Wait for propagation of altered configuration to propagate to all 
brokers.
2. Use the existing `killBroker` utility method which waits for shutdown 
using `awaitshutdown`.
3. Improve code readability by using `TestUtils.incrementalAlterConfigs` to 
send alter config requests.

Reviewers: Jason Gustafson 
---
 .../kafka/api/PlaintextAdminIntegrationTest.scala  | 82 ++
 1 file changed, 38 insertions(+), 44 deletions(-)

diff --git 
a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala 
b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala
index 7121f98bb9c..1656af08bc1 100644
--- 
a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala
+++ 
b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala
@@ -33,7 +33,6 @@ import kafka.server.{Defaults, DynamicConfig, KafkaConfig}
 import kafka.utils.TestUtils._
 import kafka.utils.{Log4jController, TestInfoUtils, TestUtils}
 import org.apache.kafka.clients.HostResolver
-import org.apache.kafka.clients.admin.AlterConfigOp.OpType
 import org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
 import org.apache.kafka.clients.admin._
 import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
@@ -159,22 +158,6 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {
 waitForTopics(client, List(), topics)
   }
 
-  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
-  @ValueSource(strings = Array("zk")) // KRaft mode will be supported in 
KAFKA-13910
-  def testMetadataRefresh(quorum: String): Unit = {
-client = Admin.create(createConfig)
-val topics = Seq("mytopic")
-val newTopics = Seq(new NewTopic("mytopic", 3, 3.toShort))
-client.createTopics(newTopics.asJava).all.get()
-waitForTopics(client, expectedPresent = topics, expectedMissing = List())
-
-val controller = brokers.find(_.config.brokerId == 
brokers.flatMap(_.metadataCache.getControllerId).head).get
-controller.shutdown()
-controller.awaitShutdown()
-val topicDesc = client.describeTopics(topics.asJava).allTopicNames.get()
-assertEquals(topics.toSet, topicDesc.keySet.asScala)
-  }
-
   /**
 * describe should not auto create topics
 */
@@ -821,10 +804,10 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {
   @ValueSource(strings = Array("zk", "kraft"))
   def testReplicaCanFetchFromLogStartOffsetAfterDeleteRecords(quorum: String): 
Unit = {
 val leaders = createTopic(topic, replicationFactor = brokerCount)
-val followerIndex = if (leaders(0) != brokers(0).config.brokerId) 0 else 1
+val followerIndex = if (leaders(0) != brokers.head.config.brokerId) 0 else 
1
 
 def waitForFollowerLog(expectedStartOffset: Long, expectedEndOffset: 
Long): Unit = {
-  TestUtils.waitUntilTrue(() => 
brokers(followerIndex).replicaManager.localLog(topicPartition) != None,
+  TestUtils.waitUntilTrue(() => 
brokers(followerIndex).replicaManager.localLog(topicPartition).isDefined,
   "Expected follower to create replica for 
partition")
 
   // wait until the follower discovers that log start offset moved beyond 
its HW
@@ -862,6 +845,7 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {
 val result1 = client.deleteRecords(Map(topicPartition -> 
RecordsToDelete.beforeOffset(117L)).asJava)
 result1.all().get()
 restartDeadBrokers()
+TestUtils.waitForBrokersInIsr(client, topicPartition, Set(followerIndex))
 waitForFollowerLog(expectedStartOffset=117L, expectedEndOffset=200L)
   }
 
@@ -1522,7 +1506,7 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {
 // Now change the preferred leader to 1
 changePreferredLeader(prefer1)
 // but shut it down...
-brokers(1).shutdown()
+killBroker(1)
 TestUtils.waitForBrokersOutOfIsr(client, Set(partition1, partition2), 
Set(1))
 
 def assertPreferredLeaderNotAvailable(
@@ -1576,9 +1560,9 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {

[kafka-site] branch minor-3-3-1 updated (8a95f342 -> f6d1d230)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a change to branch minor-3-3-1
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


from 8a95f342 MINOR; Fix the upgrade html
 add f6d1d230 MINOR; Fix versions in the upgrade doc

No new revisions were added by this update.

Summary of changes:
 33/upgrade.html | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)



[kafka-site] branch minor-3-3-1 updated (4c19d3c8 -> 8a95f342)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a change to branch minor-3-3-1
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


from 4c19d3c8 MINOR; Update documentation for 3.3.1
 add 8a95f342 MINOR; Fix the upgrade html

No new revisions were added by this update.

Summary of changes:
 33/upgrade.html | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)



[kafka] branch trunk updated: MINOR; Add missing code end tag (#12702)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 281e178352f MINOR; Add missing code end tag (#12702)
281e178352f is described below

commit 281e178352f0f48a9a68c1de2ede60c7dacf3252
Author: José Armando García Sancio 
AuthorDate: Thu Sep 29 15:09:00 2022 -0700

MINOR; Add missing code end tag (#12702)

Reviewers: Ismael Juma 
---
 docs/ops.html | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/docs/ops.html b/docs/ops.html
index 9ce051316f4..470fd7e0d92 100644
--- a/docs/ops.html
+++ b/docs/ops.html
@@ -1388,7 +1388,7 @@ $ bin/kafka-acls.sh \
 
 
bin/kafka-storage.sh format --cluster-id uuid --config 
server_properties
 
-  It is possible for the bin/kafka-storage.sh format command 
above to fail with a message like Log directory ... is already 
formatted. This can happend when combined mode is used and only the 
metadata log directory was lost but not the others. In that case and only in 
that case, can you run the kafka-storage.sh format command with 
the --ignore-formatted option.
+  It is possible for the bin/kafka-storage.sh format command 
above to fail with a message like Log directory ... is already 
formatted. This can happend when combined mode is used and only the 
metadata log directory was lost but not the others. In that case and only in 
that case, can you run the kafka-storage.sh format command with 
the --ignore-formatted option.
 
   Start the KRaft controller after formatting the log directories.
 
@@ -3408,7 +3408,7 @@ for built-in state stores, currently we have:
 
   Process Roles
 
-  In KRaft mode each Kafka server can be configured as a controller, a 
broker, or both using the process.roles property. This property can 
have the following values:
+  In KRaft mode each Kafka server can be configured as a controller, a 
broker, or both using the process.roles property. This property 
can have the following values:
 
   
 If process.roles is set to broker, the 
server acts as a broker.
@@ -3438,7 +3438,7 @@ node.id=1
 listeners=CONTROLLER://controller1.example.com:9093
 
controller.quorum.voters=1...@controller1.example.com:9093,2...@controller2.example.com:9093,3...@controller3.example.com:9093
 
-  Every broker and controller must set the 
controller.quorum.voters property. The node ID supplied in the 
controller.quorum.voters property must match the corresponding id 
on the controller servers. For example, on controller1, node.id must be set to 
1, and so forth. Each node ID must be unique across all the servers in a 
particular cluster. No two servers can have the same node ID regardless of 
their process.roles values.
+  Every broker and controller must set the 
controller.quorum.voters property. The node ID supplied in the 
controller.quorum.voters property must match the corresponding id 
on the controller servers. For example, on controller1, node.id must be set to 
1, and so forth. Each node ID must be unique across all the servers in a 
particular cluster. No two servers can have the same node ID regardless of 
their process.roles values.
 
   Storage Tool
   
@@ -3474,7 +3474,7 @@ CurrentObservers:   [0,1,2]
 
   Metadata Shell
 
-  The kafka-metadata-shell tool can be used to interactively 
inspect the state of the cluster metadata partition:
+  The kafka-metadata-shell tool can be used to interactively 
inspect the state of the cluster metadata partition:
 
   
bin/kafka-metadata-shell.sh  --snapshot 
metadata_log_dir/__cluster_metadata-0/.log



[kafka] 02/02: MINOR; Add missing code end tag (#12702)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a commit to branch 3.3
in repository https://gitbox.apache.org/repos/asf/kafka.git

commit cdb25e10dcc45cf2fbd095dcba0a7bfe22a9d1cc
Author: José Armando García Sancio 
AuthorDate: Thu Sep 29 15:09:00 2022 -0700

MINOR; Add missing code end tag (#12702)

Reviewers: Ismael Juma 
---
 docs/ops.html | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/docs/ops.html b/docs/ops.html
index 9ce051316f4..470fd7e0d92 100644
--- a/docs/ops.html
+++ b/docs/ops.html
@@ -1388,7 +1388,7 @@ $ bin/kafka-acls.sh \
 
 
bin/kafka-storage.sh format --cluster-id uuid --config 
server_properties
 
-  It is possible for the bin/kafka-storage.sh format command 
above to fail with a message like Log directory ... is already 
formatted. This can happend when combined mode is used and only the 
metadata log directory was lost but not the others. In that case and only in 
that case, can you run the kafka-storage.sh format command with 
the --ignore-formatted option.
+  It is possible for the bin/kafka-storage.sh format command 
above to fail with a message like Log directory ... is already 
formatted. This can happend when combined mode is used and only the 
metadata log directory was lost but not the others. In that case and only in 
that case, can you run the kafka-storage.sh format command with 
the --ignore-formatted option.
 
   Start the KRaft controller after formatting the log directories.
 
@@ -3408,7 +3408,7 @@ for built-in state stores, currently we have:
 
   Process Roles
 
-  In KRaft mode each Kafka server can be configured as a controller, a 
broker, or both using the process.roles property. This property can 
have the following values:
+  In KRaft mode each Kafka server can be configured as a controller, a 
broker, or both using the process.roles property. This property 
can have the following values:
 
   
 If process.roles is set to broker, the 
server acts as a broker.
@@ -3438,7 +3438,7 @@ node.id=1
 listeners=CONTROLLER://controller1.example.com:9093
 
controller.quorum.voters=1...@controller1.example.com:9093,2...@controller2.example.com:9093,3...@controller3.example.com:9093
 
-  Every broker and controller must set the 
controller.quorum.voters property. The node ID supplied in the 
controller.quorum.voters property must match the corresponding id 
on the controller servers. For example, on controller1, node.id must be set to 
1, and so forth. Each node ID must be unique across all the servers in a 
particular cluster. No two servers can have the same node ID regardless of 
their process.roles values.
+  Every broker and controller must set the 
controller.quorum.voters property. The node ID supplied in the 
controller.quorum.voters property must match the corresponding id 
on the controller servers. For example, on controller1, node.id must be set to 
1, and so forth. Each node ID must be unique across all the servers in a 
particular cluster. No two servers can have the same node ID regardless of 
their process.roles values.
 
   Storage Tool
   
@@ -3474,7 +3474,7 @@ CurrentObservers:   [0,1,2]
 
   Metadata Shell
 
-  The kafka-metadata-shell tool can be used to interactively 
inspect the state of the cluster metadata partition:
+  The kafka-metadata-shell tool can be used to interactively 
inspect the state of the cluster metadata partition:
 
   
bin/kafka-metadata-shell.sh  --snapshot 
metadata_log_dir/__cluster_metadata-0/.log



[kafka] 01/02: MINOR; Update upgrade documentation for 3.3.1 (#12701)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a commit to branch 3.3
in repository https://gitbox.apache.org/repos/asf/kafka.git

commit a4f72c47f1a89bde659925e667e9800cca1a0912
Author: José Armando García Sancio 
AuthorDate: Thu Sep 29 14:56:32 2022 -0700

MINOR; Update upgrade documentation for 3.3.1 (#12701)

Reviewers: David Arthur 
---
 docs/upgrade.html | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/docs/upgrade.html b/docs/upgrade.html
index 29192698aba..5d368f3457b 100644
--- a/docs/upgrade.html
+++ b/docs/upgrade.html
@@ -19,7 +19,7 @@
 
 

[kafka] branch 3.3 updated (1780f2660e4 -> cdb25e10dcc)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a change to branch 3.3
in repository https://gitbox.apache.org/repos/asf/kafka.git


from 1780f2660e4 KAFKA-14265: Prefix ACLs may shadow other prefix ACLs
 new a4f72c47f1a MINOR; Update upgrade documentation for 3.3.1 (#12701)
 new cdb25e10dcc MINOR; Add missing code end tag (#12702)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 docs/ops.html | 8 
 docs/upgrade.html | 8 
 2 files changed, 8 insertions(+), 8 deletions(-)



[kafka] branch trunk updated: MINOR; Update upgrade documentation for 3.3.1 (#12701)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bee7ec6f263 MINOR; Update upgrade documentation for 3.3.1 (#12701)
bee7ec6f263 is described below

commit bee7ec6f2639c72bf95e79674f74c42e210ea4bb
Author: José Armando García Sancio 
AuthorDate: Thu Sep 29 14:56:32 2022 -0700

MINOR; Update upgrade documentation for 3.3.1 (#12701)

Reviewers: David Arthur 
---
 docs/upgrade.html | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/docs/upgrade.html b/docs/upgrade.html
index eca24948d9f..d96cddbdbcb 100644
--- a/docs/upgrade.html
+++ b/docs/upgrade.html
@@ -19,7 +19,7 @@
 
 

[kafka-site] branch minor-3-3-1 updated (cccb73f4 -> 4c19d3c8)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a change to branch minor-3-3-1
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


 discard cccb73f4 MINOR; Update documentation for 3.3.1
 add 4c19d3c8 MINOR; Update documentation for 3.3.1

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (cccb73f4)
\
 N -- N -- N   refs/heads/minor-3-3-1 (4c19d3c8)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

No new revisions were added by this update.

Summary of changes:
 33/generated/admin_client_config.html | 4 ++--
 33/generated/connect_config.html  | 4 ++--
 33/generated/connect_metrics.html | 4 ++--
 33/generated/consumer_config.html | 4 ++--
 33/generated/kafka_config.html| 4 ++--
 33/generated/producer_config.html | 4 ++--
 33/generated/streams_config.html  | 2 +-
 7 files changed, 13 insertions(+), 13 deletions(-)



[kafka-site] branch asf-site updated: fixing apache.org link (#453)

2022-09-29 Thread junrao
This is an automated email from the ASF dual-hosted git repository.

junrao pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 02e96c9c fixing apache.org link (#453)
02e96c9c is described below

commit 02e96c9c72dfcf9fd6ab1aabfcfb34404469706c
Author: scott-confluent <66280178+scott-conflu...@users.noreply.github.com>
AuthorDate: Thu Sep 29 14:23:36 2022 -0700

fixing apache.org link (#453)
---
 includes/_top.htm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/includes/_top.htm b/includes/_top.htm
index 419f2a31..65f87308 100644
--- a/includes/_top.htm
+++ b/includes/_top.htm
@@ -181,7 +181,7 @@



-   https://www.apache.org/foundation/sponsorship.html; target="_blank">
+   https://www.apache.org/; 
target="_blank">

Apache.org





[kafka-site] branch asf-site updated: Add required ASF nav items (#452)

2022-09-29 Thread junrao
This is an automated email from the ASF dual-hosted git repository.

junrao pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new a2d78eca Add required ASF nav items (#452)
a2d78eca is described below

commit a2d78eca04b94a9d509fa7ba9f34af81a2a712b9
Author: scott-confluent <66280178+scott-conflu...@users.noreply.github.com>
AuthorDate: Thu Sep 29 14:20:40 2022 -0700

Add required ASF nav items (#452)
---
 includes/_top.htm | 37 +
 1 file changed, 37 insertions(+)

diff --git a/includes/_top.htm b/includes/_top.htm
index f89e13ed..419f2a31 100644
--- a/includes/_top.htm
+++ b/includes/_top.htm
@@ -150,6 +150,43 @@



+   
+   
+   Apache
+   
+   
+   
+   https://www.apache.org/licenses/; 
target="_blank">
+   License
+   
+   
+   
+   https://www.apache.org/foundation/sponsorship.html; target="_blank">
+   Donate
+   
+   
+   
+   https://www.apache.org/foundation/thanks.html; target="_blank">
+   Sponsors
+   
+   
+   
+   https://www.apache.org/security/; 
target="_blank">
+   Security
+   
+   
+   
+   https://privacy.apache.org/policies/privacy-policy-public.html; 
target="_blank">
+   Privacy
+   
+   
+   
+   https://www.apache.org/foundation/sponsorship.html; target="_blank">
+   
Apache.org
+   
+   
+   
+   


Download Kafka



[kafka-site] branch minor-3-3-1 created (now cccb73f4)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a change to branch minor-3-3-1
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


  at cccb73f4 MINOR; Update documentation for 3.3.1

This branch includes the following new commits:

 new cccb73f4 MINOR; Update documentation for 3.3.1

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[kafka-site] 01/01: MINOR; Update documentation for 3.3.1

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a commit to branch minor-3-3-1
in repository https://gitbox.apache.org/repos/asf/kafka-site.git

commit cccb73f41e6ce8a3ef1c4b16afbfb5248b388e88
Author: José Armando García Sancio 
AuthorDate: Thu Sep 29 14:02:18 2022 -0700

MINOR; Update documentation for 3.3.1
---
 33/generated/admin_client_config.html | 4 ++--
 33/generated/connect_config.html  | 4 ++--
 33/generated/connect_metrics.html | 4 ++--
 33/generated/connect_rest.yaml| 2 +-
 33/generated/consumer_config.html | 4 ++--
 33/generated/kafka_config.html| 4 ++--
 33/generated/producer_config.html | 4 ++--
 33/generated/streams_config.html  | 2 +-
 33/js/templateData.js | 2 +-
 9 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/33/generated/admin_client_config.html 
b/33/generated/admin_client_config.html
index d4114538..1b0d02f4 100644
--- a/33/generated/admin_client_config.html
+++ b/33/generated/admin_client_config.html
@@ -274,7 +274,7 @@
 The list of protocols enabled for SSL connections. The default is 
'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With 
the default value for Java 11, clients and servers will prefer TLSv1.3 if both 
support it and fallback to TLSv1.2 otherwise (assuming both support at least 
TLSv1.2). This default should be fine for most cases. Also see the config 
documentation for `ssl.protocol`.
 
 Type:list
-Default:TLSv1.2,TLSv1.3
+Default:TLSv1.2
 Valid Values:
 Importance:medium
 
@@ -294,7 +294,7 @@
 The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3' 
when running with Java 11 or newer, 'TLSv1.2' otherwise. This value should be 
fine for most use cases. Allowed values in recent JVMs are 'TLSv1.2' and 
'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3' may be supported in 
older JVMs, but their usage is discouraged due to known security 
vulnerabilities. With the default value for this config and 
'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' i [...]
 
 Type:string
-Default:TLSv1.3
+Default:TLSv1.2
 Valid Values:
 Importance:medium
 
diff --git a/33/generated/connect_config.html b/33/generated/connect_config.html
index 09625a60..7c1e7acb 100644
--- a/33/generated/connect_config.html
+++ b/33/generated/connect_config.html
@@ -344,7 +344,7 @@
 The list of protocols enabled for SSL connections. The default is 
'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With 
the default value for Java 11, clients and servers will prefer TLSv1.3 if both 
support it and fallback to TLSv1.2 otherwise (assuming both support at least 
TLSv1.2). This default should be fine for most cases. Also see the config 
documentation for `ssl.protocol`.
 
 Type:list
-Default:TLSv1.2,TLSv1.3
+Default:TLSv1.2
 Valid Values:
 Importance:medium
 
@@ -364,7 +364,7 @@
 The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3' 
when running with Java 11 or newer, 'TLSv1.2' otherwise. This value should be 
fine for most use cases. Allowed values in recent JVMs are 'TLSv1.2' and 
'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3' may be supported in 
older JVMs, but their usage is discouraged due to known security 
vulnerabilities. With the default value for this config and 
'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' i [...]
 
 Type:string
-Default:TLSv1.3
+Default:TLSv1.2
 Valid Values:
 Importance:medium
 
diff --git a/33/generated/connect_metrics.html 
b/33/generated/connect_metrics.html
index 8bbd957e..8c872239 100644
--- a/33/generated/connect_metrics.html
+++ b/33/generated/connect_metrics.html
@@ -1,5 +1,5 @@
-[2022-09-28 16:37:02,145] INFO Metrics scheduler closed 
(org.apache.kafka.common.metrics.Metrics:693)
-[2022-09-28 16:37:02,148] INFO Metrics reporters closed 
(org.apache.kafka.common.metrics.Metrics:703)
+[2022-09-29 12:06:55,585] INFO Metrics scheduler closed 
(org.apache.kafka.common.metrics.Metrics:693)
+[2022-09-29 12:06:55,587] INFO Metrics reporters closed 
(org.apache.kafka.common.metrics.Metrics:703)
 
 
 kafka.connect:type=connect-worker-metrics
diff --git a/33/generated/connect_rest.yaml b/33/generated/connect_rest.yaml
index 834edc63..d82982de 100644
--- a/33/generated/connect_rest.yaml
+++ b/33/generated/connect_rest.yaml
@@ -8,7 +8,7 @@ info:
 name: Apache 2.0
 url: https://www.apache.org/licenses/LICENSE-2.0.html
   title: Kafka Connect REST API
-  version: 3.3.0-SNAPSHOT
+  version: 3.3.1
 paths:
   /:
 get:
diff --git a/33/generated/consumer_config.html 
b/33/generated/consumer_config.html
index c29b73dd..2970db51 100644
--- a/33/generated/consumer_config.html
+++ b/33/generated/consumer_config.html
@@ -434,7 +434,7 @@
 The list of protocols enabled for SSL connections. The default is 
'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With 
the default value for Java 11, clients and servers will prefer 

[kafka] annotated tag 3.3.1-rc0 updated (e23c59d00e6 -> de86e74210d)

2022-09-29 Thread jsancio
This is an automated email from the ASF dual-hosted git repository.

jsancio pushed a change to annotated tag 3.3.1-rc0
in repository https://gitbox.apache.org/repos/asf/kafka.git


*** WARNING: tag 3.3.1-rc0 was modified! ***

from e23c59d00e6 (commit)
  to de86e74210d (tag)
 tagging e23c59d00e687ff555d30bb4dc6c0cdec2c818ae (commit)
 replaces 3.3.0
  by José Armando García Sancio
  on Thu Sep 29 12:03:49 2022 -0700

- Log -
3.3.1-rc0
---


No new revisions were added by this update.

Summary of changes:



[kafka] branch trunk updated: KAFKA-4852: Fix ByteBufferSerializer#serialize(String, ByteBuffer) not compatible with offsets (#12683)

2022-09-29 Thread guozhang
This is an automated email from the ASF dual-hosted git repository.

guozhang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 51dbd175b08 KAFKA-4852: Fix ByteBufferSerializer#serialize(String, 
ByteBuffer) not compatible with offsets (#12683)
51dbd175b08 is described below

commit 51dbd175b08e78aeca03d6752847aa5f23c98659
Author: LinShunKang 
AuthorDate: Fri Sep 30 01:59:47 2022 +0800

KAFKA-4852: Fix ByteBufferSerializer#serialize(String, ByteBuffer) not 
compatible with offsets (#12683)

Reviewers: Guozhang Wang 
---
 .../common/serialization/ByteBufferSerializer.java | 31 --
 .../common/serialization/SerializationTest.java| 19 +
 2 files changed, 42 insertions(+), 8 deletions(-)

diff --git 
a/clients/src/main/java/org/apache/kafka/common/serialization/ByteBufferSerializer.java
 
b/clients/src/main/java/org/apache/kafka/common/serialization/ByteBufferSerializer.java
index 9fb12544e0f..5987688759e 100644
--- 
a/clients/src/main/java/org/apache/kafka/common/serialization/ByteBufferSerializer.java
+++ 
b/clients/src/main/java/org/apache/kafka/common/serialization/ByteBufferSerializer.java
@@ -16,25 +16,40 @@
  */
 package org.apache.kafka.common.serialization;
 
+import org.apache.kafka.common.utils.Utils;
+
 import java.nio.ByteBuffer;
 
+/**
+ * ByteBufferSerializer will not change ByteBuffer's mark, position and limit.
+ * And do not need to flip before call serialize(String, ByteBuffer). 
For example:
+ *
+ * 
+ * 
+ * ByteBufferSerializer serializer = ...; // Create Serializer
+ * ByteBuffer buffer = ...;   // Allocate ByteBuffer
+ * buffer.put(data);  // Put data into buffer, do not need 
to flip
+ * serializer.serialize(topic, buffer);   // Serialize buffer
+ * 
+ * 
+ */
 public class ByteBufferSerializer implements Serializer {
+
+@Override
 public byte[] serialize(String topic, ByteBuffer data) {
-if (data == null)
+if (data == null) {
 return null;
-
-data.rewind();
+}
 
 if (data.hasArray()) {
-byte[] arr = data.array();
+final byte[] arr = data.array();
 if (data.arrayOffset() == 0 && arr.length == data.remaining()) {
 return arr;
 }
 }
 
-byte[] ret = new byte[data.remaining()];
-data.get(ret, 0, ret.length);
-data.rewind();
-return ret;
+final ByteBuffer copyData = data.asReadOnlyBuffer();
+copyData.flip();
+return Utils.toArray(copyData);
 }
 }
diff --git 
a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
 
b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
index 85c09dd17ae..eb1fee3943f 100644
--- 
a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
+++ 
b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
@@ -31,6 +31,8 @@ import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.Stack;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
@@ -368,4 +370,21 @@ public class SerializationTest {
 
 return Serdes.serdeFrom(serializer, deserializer);
 }
+
+@Test
+public void testByteBufferSerializer() {
+final byte[] bytes = "Hello".getBytes(UTF_8);
+final ByteBuffer heapBuffer0 = ByteBuffer.allocate(bytes.length + 
1).put(bytes);
+final ByteBuffer heapBuffer1 = 
ByteBuffer.allocate(bytes.length).put(bytes);
+final ByteBuffer heapBuffer2 = ByteBuffer.wrap(bytes);
+final ByteBuffer directBuffer0 = 
ByteBuffer.allocateDirect(bytes.length + 1).put(bytes);
+final ByteBuffer directBuffer1 = 
ByteBuffer.allocateDirect(bytes.length).put(bytes);
+try (final ByteBufferSerializer serializer = new 
ByteBufferSerializer()) {
+assertArrayEquals(bytes, serializer.serialize(topic, heapBuffer0));
+assertArrayEquals(bytes, serializer.serialize(topic, heapBuffer1));
+assertArrayEquals(bytes, serializer.serialize(topic, heapBuffer2));
+assertArrayEquals(bytes, serializer.serialize(topic, 
directBuffer0));
+assertArrayEquals(bytes, serializer.serialize(topic, 
directBuffer1));
+}
+}
 }



[kafka] branch 3.3 updated: KAFKA-14265: Prefix ACLs may shadow other prefix ACLs

2022-09-29 Thread cmccabe
This is an automated email from the ASF dual-hosted git repository.

cmccabe pushed a commit to branch 3.3
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.3 by this push:
 new 1780f2660e4 KAFKA-14265: Prefix ACLs may shadow other prefix ACLs
1780f2660e4 is described below

commit 1780f2660e4b45ca8895f1614a27613f922aad22
Author: Colin P. McCabe 
AuthorDate: Wed Sep 28 17:02:04 2022 -0700

KAFKA-14265: Prefix ACLs may shadow other prefix ACLs
---
 .../kafka/api/AuthorizerIntegrationTest.scala  | 14 +-
 .../authorizer/StandardAuthorizerData.java | 55 +-
 .../authorizer/StandardAuthorizerTest.java | 27 +++
 3 files changed, 83 insertions(+), 13 deletions(-)

diff --git 
a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala 
b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
index a109ae8ce4c..ff1b2f5934d 100644
--- a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
+++ b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
@@ -25,7 +25,7 @@ import kafka.security.authorizer.AclEntry.WildcardHost
 import kafka.server.{BaseRequestTest, KafkaConfig}
 import kafka.utils.{TestInfoUtils, TestUtils}
 import kafka.utils.TestUtils.waitUntilTrue
-import org.apache.kafka.clients.admin.{Admin, AlterConfigOp}
+import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, NewTopic}
 import org.apache.kafka.clients.consumer._
 import 
org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener
 import org.apache.kafka.clients.producer._
@@ -2619,4 +2619,16 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
 )
   }
 
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testPrefixAcls(quorum: String): Unit = {
+addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, 
WildcardHost, CREATE, ALLOW)),
+  new ResourcePattern(TOPIC, "f", PREFIXED))
+addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", 
WildcardHost, CREATE, DENY)),
+  new ResourcePattern(TOPIC, "fooa", PREFIXED))
+addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", 
WildcardHost, CREATE, ALLOW)),
+  new ResourcePattern(TOPIC, "foob", PREFIXED))
+createAdminClient().createTopics(Collections.
+  singletonList(new NewTopic("foobar", 1, 1.toShort))).all().get()
+  }
 }
diff --git 
a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
 
b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
index c6e3b74a2ab..6e9efc3cd5d 100644
--- 
a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
+++ 
b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
@@ -352,8 +352,10 @@ public class StandardAuthorizerData {
 // This code relies on the ordering of StandardAcl within the 
NavigableMap.
 // Entries are sorted by resource type first, then REVERSE resource 
name.
 // Therefore, we can find all the applicable ACLs by starting at
-// (resource_type, resource_name) and stepping forwards until we reach 
an ACL with
-// a resource name which is not a prefix of the current one.
+// (resource_type, resource_name) and stepping forwards until we reach
+// an ACL with a resource name which is not a prefix of the current 
one.
+// At that point, we need to search for if there are any more ACLs at
+// the first divergence point.
 //
 // For example, when trying to authorize a TOPIC resource named 
foobar, we would
 // start at element 2, and continue on to 3 and 4 following map:
@@ -362,9 +364,12 @@ public class StandardAuthorizerData {
 // 2. rs=TOPIC rn=foobar pt=PREFIX
 // 3. rs=TOPIC rn=foob pt=LITERAL
 // 4. rs=TOPIC rn=foo pt=PREFIX
-// 5. rs=TOPIC rn= pt=LITERAL
+// 5. rs=TOPIC rn=fb pt=PREFIX
+// 6. rs=TOPIC rn=fa pt=PREFIX
+// 7. rs=TOPIC rn=f  pt=PREFIX
+// 8. rs=TOPIC rn= pt=LITERAL
 //
-// Once we reached element 5, we would stop scanning.
+// Once we reached element 5, we would jump to element 7.
 MatchingAclBuilder matchingAclBuilder = new MatchingAclBuilder();
 StandardAcl exemplar = new StandardAcl(
 action.resourcePattern().resourceType(),
@@ -394,6 +399,20 @@ public class StandardAuthorizerData {
 return matchingAclBuilder.build();
 }
 
+static int matchesUpTo(
+String resource,
+String pattern
+) {
+int i = 0;
+while (true) {
+if (resource.length() == i) break;
+if (pattern.length() == i) break;
+if (resource.charAt(i) != pattern.charAt(i)) 

[kafka] branch trunk updated (d62a42df2e2 -> b9da249bdfa)

2022-09-29 Thread cmccabe
This is an automated email from the ASF dual-hosted git repository.

cmccabe pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


from d62a42df2e2 KAFKA-10199: Integrate Topology Pause/Resume with 
StateUpdater (#12659)
 add ba89eaf0d7f KAFKA-14265: Prefix ACLs may shadow other prefix ACLs
 add fc786c335c6 add unit and integration tests
 new b9da249bdfa fix test

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../kafka/api/AuthorizerIntegrationTest.scala  | 14 +-
 .../authorizer/StandardAuthorizerData.java | 55 +-
 .../authorizer/StandardAuthorizerTest.java | 27 +++
 3 files changed, 83 insertions(+), 13 deletions(-)



[kafka] 01/01: fix test

2022-09-29 Thread cmccabe
This is an automated email from the ASF dual-hosted git repository.

cmccabe pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git

commit b9da249bdfa9df7a511284d815af8200ab0dfbd9
Author: Colin P. McCabe 
AuthorDate: Thu Sep 29 09:17:03 2022 -0700

fix test
---
 .../test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git 
a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala 
b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
index d6f736055b2..ff1b2f5934d 100644
--- a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
+++ b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
@@ -2628,8 +2628,7 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
   new ResourcePattern(TOPIC, "fooa", PREFIXED))
 addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", 
WildcardHost, CREATE, ALLOW)),
   new ResourcePattern(TOPIC, "foob", PREFIXED))
-val future = createAdminClient().createTopics(Collections.
-  singletonList(new NewTopic("foobar", 1, 1.toShort))).all()
-JTestUtils.assertFutureThrows(future, classOf[TopicAuthorizationException])
+createAdminClient().createTopics(Collections.
+  singletonList(new NewTopic("foobar", 1, 1.toShort))).all().get()
   }
 }



[kafka] 01/01: add unit and integration tests

2022-09-29 Thread cmccabe
This is an automated email from the ASF dual-hosted git repository.

cmccabe pushed a commit to branch KAFKA-14265
in repository https://gitbox.apache.org/repos/asf/kafka.git

commit fc786c335c6f51a9022b0190b15f57f42e4ea9ab
Author: Colin P. McCabe 
AuthorDate: Wed Sep 28 22:58:39 2022 -0700

add unit and integration tests
---
 .../kafka/api/AuthorizerIntegrationTest.scala  | 15 +++-
 .../authorizer/StandardAuthorizerData.java |  2 +-
 .../authorizer/StandardAuthorizerTest.java | 27 ++
 3 files changed, 42 insertions(+), 2 deletions(-)

diff --git 
a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala 
b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
index a109ae8ce4c..d6f736055b2 100644
--- a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
+++ b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
@@ -25,7 +25,7 @@ import kafka.security.authorizer.AclEntry.WildcardHost
 import kafka.server.{BaseRequestTest, KafkaConfig}
 import kafka.utils.{TestInfoUtils, TestUtils}
 import kafka.utils.TestUtils.waitUntilTrue
-import org.apache.kafka.clients.admin.{Admin, AlterConfigOp}
+import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, NewTopic}
 import org.apache.kafka.clients.consumer._
 import 
org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener
 import org.apache.kafka.clients.producer._
@@ -2619,4 +2619,17 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
 )
   }
 
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testPrefixAcls(quorum: String): Unit = {
+addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, 
WildcardHost, CREATE, ALLOW)),
+  new ResourcePattern(TOPIC, "f", PREFIXED))
+addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", 
WildcardHost, CREATE, DENY)),
+  new ResourcePattern(TOPIC, "fooa", PREFIXED))
+addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", 
WildcardHost, CREATE, ALLOW)),
+  new ResourcePattern(TOPIC, "foob", PREFIXED))
+val future = createAdminClient().createTopics(Collections.
+  singletonList(new NewTopic("foobar", 1, 1.toShort))).all()
+JTestUtils.assertFutureThrows(future, classOf[TopicAuthorizationException])
+  }
 }
diff --git 
a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
 
b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
index b52a02562fe..6e9efc3cd5d 100644
--- 
a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
+++ 
b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
@@ -656,4 +656,4 @@ public class StandardAuthorizerData {
 HashMap getAclsById() {
 return aclsById;
 }
-}
+}
\ No newline at end of file
diff --git 
a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java
 
b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java
index 987c00155c4..a26eb3d50b5 100644
--- 
a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java
+++ 
b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java
@@ -640,4 +640,31 @@ public class StandardAuthorizerTest {
 assertTrue(futures.get(CONTROLLER).toCompletableFuture().isDone());
 
assertFalse(futures.get(CONTROLLER).toCompletableFuture().isCompletedExceptionally());
 }
+
+@Test
+public void testPrefixAcls() throws Exception {
+StandardAuthorizer authorizer = 
createAndInitializeStandardAuthorizer();
+List acls = Arrays.asList(
+new StandardAcl(TOPIC, "fooa", PREFIXED, "User:alice", "*", 
ALL, ALLOW),
+new StandardAcl(TOPIC, "foobar", LITERAL, "User:bob", "*", 
ALL, ALLOW),
+new StandardAcl(TOPIC, "f", PREFIXED, "User:bob", "*", ALL, 
ALLOW)
+);
+acls.forEach(acl -> {
+StandardAclWithId aclWithId = withId(acl);
+authorizer.addAcl(aclWithId.id(), aclWithId.acl());
+});
+assertEquals(Arrays.asList(ALLOWED, DENIED, ALLOWED), 
authorizer.authorize(
+newRequestContext("bob"),
+Arrays.asList(
+newAction(WRITE, TOPIC, "foobarr"),
+newAction(READ, TOPIC, "goobar"),
+newAction(READ, TOPIC, "fooa";
+
+assertEquals(Arrays.asList(ALLOWED, DENIED, DENIED), 
authorizer.authorize(
+newRequestContext("alice"),
+Arrays.asList(
+newAction(DESCRIBE, TOPIC, "fooa"),
+newAction(WRITE, TOPIC, "bar"),
+newAction(READ, TOPIC, "baz";
+}
 }



[kafka] branch KAFKA-14265 created (now fc786c335c6)

2022-09-29 Thread cmccabe
This is an automated email from the ASF dual-hosted git repository.

cmccabe pushed a change to branch KAFKA-14265
in repository https://gitbox.apache.org/repos/asf/kafka.git


  at fc786c335c6 add unit and integration tests

This branch includes the following new commits:

 new fc786c335c6 add unit and integration tests

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.