showuon commented on a change in pull request #11564:
URL: https://github.com/apache/kafka/pull/11564#discussion_r761686373



##########
File path: core/src/main/scala/kafka/tools/ConsoleProducer.scala
##########
@@ -146,62 +146,71 @@ object ConsoleProducer {
       .describedAs("size")
       .ofType(classOf[java.lang.Integer])
       .defaultsTo(200)
-    val messageSendMaxRetriesOpt = parser.accepts("message-send-max-retries", 
"Brokers can fail receiving the message for multiple reasons, and being 
unavailable transiently is just one of them. This property specifies the number 
of retries before the producer give up and drop this message.")
+    val messageSendMaxRetriesOpt = parser.accepts("message-send-max-retries", 
"Brokers can fail receiving the message for multiple reasons, " +
+      "and being unavailable transiently is just one of them. This property 
specifies the number of retries before the producer give up and drop this 
message. " +
+      "This is the option to control the `retries` in producer configs.")
       .withRequiredArg
       .ofType(classOf[java.lang.Integer])
-      .defaultsTo(3)
-    val retryBackoffMsOpt = parser.accepts("retry-backoff-ms", "Before each 
retry, the producer refreshes the metadata of relevant topics. Since leader 
election takes a bit of time, this property specifies the amount of time that 
the producer waits before refreshing the metadata.")
+      .defaultsTo(Integer.MAX_VALUE)
+    val retryBackoffMsOpt = parser.accepts("retry-backoff-ms", "Before each 
retry, the producer refreshes the metadata of relevant topics. " +
+      "Since leader election takes a bit of time, this property specifies the 
amount of time that the producer waits before refreshing the metadata. " +
+      "This is the option to control the `retry.backoff.ms` in producer 
configs.")
       .withRequiredArg
-      .ofType(classOf[java.lang.Integer])
+      .ofType(classOf[java.lang.Long])
       .defaultsTo(100)
     val sendTimeoutOpt = parser.accepts("timeout", "If set and the producer is 
running in asynchronous mode, this gives the maximum amount of time" +
-      " a message will queue awaiting sufficient batch size. The value is 
given in ms.")
+      " a message will queue awaiting sufficient batch size. The value is 
given in ms. " +
+      "This is the option to control the `linger.ms` in producer configs.")
       .withRequiredArg
       .describedAs("timeout_ms")
-      .ofType(classOf[java.lang.Integer])
+      .ofType(classOf[java.lang.Long])
       .defaultsTo(1000)
-    val requestRequiredAcksOpt = parser.accepts("request-required-acks", "The 
required acks of the producer requests")
+    val requestRequiredAcksOpt = parser.accepts("request-required-acks", "The 
required `acks` of the producer requests")
       .withRequiredArg
       .describedAs("request required acks")
       .ofType(classOf[java.lang.String])
       .defaultsTo("1")
-    val requestTimeoutMsOpt = parser.accepts("request-timeout-ms", "The ack 
timeout of the producer requests. Value must be non-negative and non-zero")
+    val requestTimeoutMsOpt = parser.accepts("request-timeout-ms", "The ack 
timeout of the producer requests. Value must be non-negative and non-zero.")
       .withRequiredArg
       .describedAs("request timeout ms")
       .ofType(classOf[java.lang.Integer])
       .defaultsTo(1500)
     val metadataExpiryMsOpt = parser.accepts("metadata-expiry-ms",
-      "The period of time in milliseconds after which we force a refresh of 
metadata even if we haven't seen any leadership changes.")
+      "The period of time in milliseconds after which we force a refresh of 
metadata even if we haven't seen any leadership changes. " +
+        "This is the option to control `metadata.max.age.ms` in producer 
configs.")
       .withRequiredArg
       .describedAs("metadata expiration interval")
       .ofType(classOf[java.lang.Long])
       .defaultsTo(5*60*1000L)
     val maxBlockMsOpt = parser.accepts("max-block-ms",
-      "The max time that the producer will block for during a send request")
+      "The max time that the producer will block for during a send request.")
       .withRequiredArg
       .describedAs("max block on send")
       .ofType(classOf[java.lang.Long])
       .defaultsTo(60*1000L)
     val maxMemoryBytesOpt = parser.accepts("max-memory-bytes",
-      "The total memory used by the producer to buffer records waiting to be 
sent to the server.")
+      "The total memory used by the producer to buffer records waiting to be 
sent to the server. " +
+        "This is the option to control `buffer.memory` in producer configs.")
       .withRequiredArg
       .describedAs("total memory in bytes")
       .ofType(classOf[java.lang.Long])
       .defaultsTo(32 * 1024 * 1024L)
     val maxPartitionMemoryBytesOpt = 
parser.accepts("max-partition-memory-bytes",
       "The buffer size allocated for a partition. When records are received 
which are smaller than this size the producer " +
-        "will attempt to optimistically group them together until this size is 
reached.")
+        "will attempt to optimistically group them together until this size is 
reached. " +
+        "This is the option to control `batch.size` in producer configs.")
       .withRequiredArg
       .describedAs("memory in bytes per partition")
-      .ofType(classOf[java.lang.Long])
-      .defaultsTo(16 * 1024L)
+      .ofType(classOf[java.lang.Integer])
+      .defaultsTo(16 * 1024)

Review comment:
       Wrong expected type. The `batch.size` expected Integer type. Same as 
above.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to