hachikuji commented on a change in pull request #10814:
URL: https://github.com/apache/kafka/pull/10814#discussion_r652193627



##########
File path: tools/src/main/java/org/apache/kafka/tools/TransactionsCommand.java
##########
@@ -0,0 +1,622 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.tools;
+
+import net.sourceforge.argparse4j.ArgumentParsers;
+import net.sourceforge.argparse4j.inf.ArgumentGroup;
+import net.sourceforge.argparse4j.inf.ArgumentParser;
+import net.sourceforge.argparse4j.inf.ArgumentParserException;
+import net.sourceforge.argparse4j.inf.Namespace;
+import net.sourceforge.argparse4j.inf.Subparser;
+import net.sourceforge.argparse4j.inf.Subparsers;
+import org.apache.kafka.clients.admin.AbortTransactionSpec;
+import org.apache.kafka.clients.admin.Admin;
+import org.apache.kafka.clients.admin.AdminClientConfig;
+import org.apache.kafka.clients.admin.DescribeProducersOptions;
+import org.apache.kafka.clients.admin.DescribeProducersResult;
+import org.apache.kafka.clients.admin.ProducerState;
+import org.apache.kafka.clients.admin.TransactionListing;
+import org.apache.kafka.clients.admin.TransactionDescription;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Exit;
+import org.apache.kafka.common.utils.Time;
+import org.apache.kafka.common.utils.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalLong;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import static java.util.Collections.singleton;
+import static java.util.Collections.singletonList;
+import static net.sourceforge.argparse4j.impl.Arguments.store;
+
+public abstract class TransactionsCommand {
+    private static final Logger log = 
LoggerFactory.getLogger(TransactionsCommand.class);
+
+    protected final Time time;
+
+    protected TransactionsCommand(Time time) {
+        this.time = time;
+    }
+
+    /**
+     * Get the name of this command (e.g. `describe-producers`).
+     */
+    abstract String name();
+
+    /**
+     * Specify the arguments needed for this command.
+     */
+    abstract void addSubparser(Subparsers subparsers);
+
+    /**
+     * Execute the command logic.
+     */
+    abstract void execute(Admin admin, Namespace ns, PrintStream out) throws 
Exception;
+
+
+    static class AbortTransactionCommand extends TransactionsCommand {
+
+        AbortTransactionCommand(Time time) {
+            super(time);
+        }
+
+        @Override
+        String name() {
+            return "abort";
+        }
+
+        @Override
+        void addSubparser(Subparsers subparsers) {
+            Subparser subparser = subparsers.addParser(name())
+                .help("abort a hanging transaction (requires administrative 
privileges)");
+
+            subparser.addArgument("--topic")
+                .help("topic name")
+                .action(store())
+                .type(String.class)
+                .required(true);
+
+            subparser.addArgument("--partition")
+                .help("partition number")
+                .action(store())
+                .type(Integer.class)
+                .required(true);
+
+            ArgumentGroup newBrokerArgumentGroup = 
subparser.addArgumentGroup("new brokers")
+                .description("For newer brokers, you must provide the start 
offset of the transaction " +
+                    "to be aborted");
+
+            newBrokerArgumentGroup.addArgument("--start-offset")
+                .help("start offset of the transaction to abort")
+                .action(store())
+                .type(Long.class);
+
+            ArgumentGroup olderBrokerArgumentGroup = 
subparser.addArgumentGroup("older brokers")
+                .description("For older brokers, you must provide all of these 
arguments");
+
+            olderBrokerArgumentGroup.addArgument("--producer-id")
+                .help("producer id")
+                .action(store())
+                .type(Long.class);
+
+            olderBrokerArgumentGroup.addArgument("--producer-epoch")
+                .help("producer epoch")
+                .action(store())
+                .type(Short.class);
+
+            olderBrokerArgumentGroup.addArgument("--coordinator-epoch")
+                .help("coordinator epoch")
+                .action(store())
+                .type(Integer.class);
+        }
+
+        private AbortTransactionSpec buildAbortSpec(
+            Admin admin,
+            TopicPartition topicPartition,
+            long startOffset
+        ) throws Exception {
+            final DescribeProducersResult.PartitionProducerState result;
+            try {
+                result = admin.describeProducers(singleton(topicPartition))
+                    .partitionResult(topicPartition)
+                    .get();
+            } catch (ExecutionException e) {
+                printErrorAndExit("Failed to validate producer state for 
partition "
+                    + topicPartition, e.getCause());
+                return null;
+            }
+
+            Optional<ProducerState> foundProducerState = 
result.activeProducers().stream()
+                .filter(producerState -> {
+                    OptionalLong txnStartOffsetOpt = 
producerState.currentTransactionStartOffset();
+                    return txnStartOffsetOpt.isPresent() && 
txnStartOffsetOpt.getAsLong() == startOffset;
+                })
+                .findFirst();
+
+            if (!foundProducerState.isPresent()) {
+                printErrorAndExit("Could not find any open transactions 
starting at offset " +
+                    startOffset + " on partition " + topicPartition);
+                return null;
+            }
+
+            ProducerState producerState = foundProducerState.get();
+            return new AbortTransactionSpec(
+                topicPartition,
+                producerState.producerId(),
+                (short) producerState.producerEpoch(),
+                producerState.coordinatorEpoch().orElse(0)
+            );
+        }
+
+        private void abortTransaction(
+            Admin admin,
+            AbortTransactionSpec abortSpec
+        ) throws Exception {
+            try {
+                admin.abortTransaction(abortSpec).all().get();
+            } catch (ExecutionException e) {
+                TransactionsCommand.printErrorAndExit("Failed to abort 
transaction " + abortSpec, e.getCause());
+            }
+        }
+
+        @Override
+        void execute(Admin admin, Namespace ns, PrintStream out) throws 
Exception {
+            String topicName = ns.getString("topic");
+            Integer partitionId = ns.getInt("partition");
+            TopicPartition topicPartition = new TopicPartition(topicName, 
partitionId);
+
+            Long startOffset = ns.getLong("start_offset");
+            Long producerId = ns.getLong("producer_id");
+
+            if (startOffset == null && producerId == null) {
+                printErrorAndExit("The transaction to abort must be identified 
either with " +
+                    "--start-offset (for newer brokers) or with " +
+                    "--producer-id, --producer-epoch, and --coordinator-epoch 
(for older brokers)");
+                return;
+            }
+
+            final AbortTransactionSpec abortSpec;
+            if (startOffset == null) {
+                Short producerEpoch = ns.getShort("producer_epoch");
+                if (producerEpoch == null) {
+                    printErrorAndExit("Missing required argument 
--producer-epoch");
+                    return;
+                }
+
+                Integer coordinatorEpoch = ns.getInt("coordinator_epoch");
+                if (coordinatorEpoch == null) {
+                    printErrorAndExit("Missing required argument 
--coordinator-epoch");
+                    return;
+                }
+
+                // If a transaction was started by a new producerId and became 
hanging
+                // before the initial commit/abort, then the coordinator epoch 
will be -1
+                // as seen in the `DescribeProducers` output. In this case, we 
conservatively
+                // use a coordinator epoch of 0, which is less than or equal 
to any possible
+                // leader epoch.
+                if (coordinatorEpoch < 0) {

Review comment:
       What would be the advantage of doing so? One nice thing about this 
implementation is that it is compatible with older brokers which require a 
non-negative coordinator epoch.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to