chia7712 commented on a change in pull request #10275: URL: https://github.com/apache/kafka/pull/10275#discussion_r598444486
########## File path: clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiHandler.java ########## @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin.internals; + +import org.apache.kafka.common.requests.AbstractRequest; +import org.apache.kafka.common.requests.AbstractResponse; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.Objects.requireNonNull; + +public interface AdminApiHandler<K, V> { + + /** + * Get a user-friendly name for the API this handler is implementing. + */ + String apiName(); + + /** + * Initialize the set of keys required to handle this API and how the driver + * should map them to the broker that will handle the request for these keys. + * + * Two mapping types are supported: + * + * - Static mapping: when the brokerId is known ahead of time + * - Dynamic mapping: when the brokerId must be discovered dynamically + * + * @return the key mappings + */ + Keys<K> initializeKeys(); + + /** + * Build the request. The set of keys are derived by {@link AdminApiDriver} + * during the lookup stage as the set of keys which all map to the same + * destination broker. + * + * @param brokerId the target brokerId for the request + * @param keys the set of keys that should be handled by this request + * + * @return a builder for the request containing the given keys + */ + AbstractRequest.Builder<?> buildRequest(Integer brokerId, Set<K> keys); + + /** + * Callback that is invoked when a request returns successfully. + * The handler should parse the response, check for errors, and return a + * result which indicates which keys (if any) have either been completed or + * failed with an unrecoverable error. + * + * It is also possible that the response indicates an incorrect target brokerId + * (e.g. in the case of a NotLeader error when the request is bound for a partition + * leader). In this case the key will be "unmapped" from the target brokerId + * and lookup will be retried. + * + * Note that keys which received a retriable error should be left out of the + * result. They will be retried automatically. + * + * @param brokerId the brokerId that the associated request was sent to + * @param keys the set of keys from the associated request + * @param response the response received from the broker + * + * @return result indicating key completion, failure, and unmapping + */ + ApiResult<K, V> handleResponse(Integer brokerId, Set<K> keys, AbstractResponse response); + + class Keys<K> { + public final Map<K, Integer> staticKeys; + public final Set<K> dynamicKeys; + public final AdminApiLookupStrategy<K> lookupStrategy; + + public Keys( Review comment: Is it legal that key exists in both `staticKeys` and `dynamicKeys`? If not, could you add a check for them? ########## File path: clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiDriver.java ########## @@ -0,0 +1,461 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin.internals; + +import org.apache.kafka.clients.admin.internals.AdminApiHandler.Keys; +import org.apache.kafka.common.errors.DisconnectException; +import org.apache.kafka.common.internals.KafkaFutureImpl; +import org.apache.kafka.common.requests.AbstractRequest; +import org.apache.kafka.common.requests.AbstractResponse; +import org.apache.kafka.common.utils.LogContext; +import org.slf4j.Logger; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.Set; +import java.util.function.BiFunction; + +/** + * The `KafkaAdminClient`'s internal `Call` primitive is not a good fit for multi-stage + * request workflows such as we see with the group coordinator APIs or any request which + * needs to be sent to a partition leader. Typically these APIs have two concrete stages: + * + * 1. Lookup: Find the broker that can fulfill the request (e.g. partition leader or group + * coordinator) + * 2. Fulfillment: Send the request to the broker found in the first step + * + * This is complicated by the fact that `Admin` APIs are typically batched, which + * means the Lookup stage may result in a set of brokers. For example, take a `ListOffsets` + * request for a set of topic partitions. In the Lookup stage, we will find the partition + * leaders for this set of partitions; in the Fulfillment stage, we will group together + * partition according to the IDs of the discovered leaders. + * + * Additionally, the flow between these two stages is bi-directional. We may find after + * sending a `ListOffsets` request to an expected leader that there was a leader change. + * This would result in a topic partition being sent back to the Lookup stage. + * + * Managing this complexity by chaining together `Call` implementations is challenging + * and messy, so instead we use this class to do the bookkeeping. It handles both the + * batching aspect as well as the transitions between the Lookup and Fulfillment stages. + * + * Note that the interpretation of the `retries` configuration becomes ambiguous + * for this kind of pipeline. We could treat it as an overall limit on the number + * of requests that can be sent, but that is not very useful because each pipeline + * has a minimum number of requests that need to be sent in order to satisfy the request. + * Instead, we treat this number of retries independently at each stage so that each + * stage has at least one opportunity to complete. So if a user sets `retries=1`, then + * the full pipeline can still complete as long as there are no request failures. + * + * @param <K> The key type, which is also the granularity of the request routing (e.g. + * this could be `TopicPartition` in the case of requests intended for a partition + * leader or the `GroupId` in the case of consumer group requests intended for + * the group coordinator) + * @param <V> The fulfillment type for each key (e.g. this could be consumer group state + * when the key type is a consumer `GroupId`) + */ +public class AdminApiDriver<K, V> { + private final Logger log; + private final long retryBackoffMs; + private final long deadlineMs; + private final AdminApiHandler<K, V> handler; + private final Keys<K> keys; + private final Map<K, KafkaFutureImpl<V>> futures; + + private final BiMultimap<ApiRequestScope, K> lookupMap = new BiMultimap<>(); + private final BiMultimap<FulfillmentScope, K> fulfillmentMap = new BiMultimap<>(); + private final Map<ApiRequestScope, RequestState> requestStates = new HashMap<>(); + + public AdminApiDriver( + AdminApiHandler<K, V> handler, + long deadlineMs, + long retryBackoffMs, + LogContext logContext + ) { + this.handler = handler; + this.deadlineMs = deadlineMs; + this.retryBackoffMs = retryBackoffMs; + this.log = logContext.logger(AdminApiDriver.class); + this.futures = new HashMap<>(); + this.keys = initializeKeys(handler); + } + + private Keys<K> initializeKeys(AdminApiHandler<K, V> handler) { + Keys<K> keys = handler.initializeKeys(); + + keys.staticKeys.forEach((key, brokerId) -> { + futures.put(key, new KafkaFutureImpl<>()); + map(key, brokerId); + }); + + keys.dynamicKeys.forEach(key -> { + futures.put(key, new KafkaFutureImpl<>()); + lookupMap.put(keys.lookupStrategy.lookupScope(key), key); + }); + + return keys; + } + + /** + * Associate a key with a brokerId. This is called after a response in the Lookup + * stage reveals the mapping (e.g. when the `FindCoordinator` tells us the the + * group coordinator for a specific consumer group). + */ + private void map(K key, Integer brokerId) { + lookupMap.remove(key); + fulfillmentMap.put(new FulfillmentScope(brokerId), key); + } + + /** + * Disassociate a key from the currently mapped brokerId. This will send the key + * back to the Lookup stage, which will allow us to attempt lookup again. + */ + private void unmap(K key) { + if (!keys.dynamicKeys.contains(key)) { + throw new IllegalStateException("Attempt to unmap key " + key + " which is not dynamically mapped"); + } + + fulfillmentMap.remove(key); + lookupMap.put(keys.lookupStrategy.lookupScope(key), key); + } + + private void clear(K key) { + lookupMap.remove(key); + fulfillmentMap.remove(key); + } + + OptionalInt keyToBrokerId(K key) { + Optional<FulfillmentScope> scope = fulfillmentMap.getKey(key); + if (scope.isPresent()) { + return OptionalInt.of(scope.get().destinationBrokerId); + } else { + return OptionalInt.empty(); + } + } + + /** + * Complete the future associated with the given key exceptionally. After is called, + * the key will be taken out of both the Lookup and Fulfillment stages so that request + * are not retried. + */ + private void completeExceptionally(K key, Throwable t) { + KafkaFutureImpl<V> future = futures.get(key); + if (future == null) { + log.warn("Attempt to complete future for {}, which was not requested", key); + } else { + clear(key); + future.completeExceptionally(t); + } + } + + /** + * Complete the future associated with the given key. After is called, the key will + * be taken out of both the Lookup and Fulfillment stages so that request are not retried. + */ + private void complete(K key, V value) { + KafkaFutureImpl<V> future = futures.get(key); + if (future == null) { + log.warn("Attempt to complete future for {}, which was not requested", key); + } else { + clear(key); + future.complete(value); + } + } + + /** + * Check whether any requests need to be sent. This should be called immediately + * after the driver is constructed and then again after each request returns + * (i.e. after {@link #onFailure(long, RequestSpec, Throwable)} or + * {@link #onResponse(long, RequestSpec, AbstractResponse)}). + * + * @return A list of requests that need to be sent + */ + public List<RequestSpec<K>> poll() { + List<RequestSpec<K>> requests = new ArrayList<>(); + collectLookupRequests(requests); + collectFulfillmentRequests(requests); + return requests; + } + + /** + * Get a map of the futures that are awaiting completion. + */ + public Map<K, KafkaFutureImpl<V>> futures() { + return futures; + } + + /** + * Callback that is invoked when a `Call` returns a response successfully. + */ + public void onResponse( + long currentTimeMs, + RequestSpec<K> spec, + AbstractResponse response + ) { + clearInflightRequest(currentTimeMs, spec); + + if (spec.scope instanceof FulfillmentScope) { + int brokerId = ((FulfillmentScope) spec.scope).destinationBrokerId; + AdminApiHandler.ApiResult<K, V> result = handler.handleResponse( + brokerId, + spec.keys, + response + ); + result.completedKeys.forEach(this::complete); + result.failedKeys.forEach(this::completeExceptionally); + result.unmappedKeys.forEach(this::unmap); + } else { + AdminApiLookupStrategy.LookupResult<K> result = keys.lookupStrategy.handleResponse( + spec.keys, + response + ); + result.failedKeys.forEach(this::completeExceptionally); + result.mappedKeys.forEach(this::map); + } + } + + /** + * Callback that is invoked when a `Call` is failed. + */ + public void onFailure( + long currentTimeMs, + RequestSpec<K> spec, + Throwable t + ) { + clearInflightRequest(currentTimeMs, spec); + if (t instanceof DisconnectException) { + log.debug("Node disconnected before response could be received for request {}. " + + "Will attempt retry", spec.request); + + // After a disconnect, we want the driver to attempt to lookup the key + // again (if the key is dynamically mapped). This gives us a chance to + // find a new coordinator or partition leader for example. + spec.keys.stream() + .filter(keys.dynamicKeys::contains) + .forEach(this::unmap); + } else { + spec.keys.forEach(key -> completeExceptionally(key, t)); + } + } + + private void clearInflightRequest(long currentTimeMs, RequestSpec<K> spec) { + RequestState requestState = requestStates.get(spec.scope); + if (requestState != null) { + requestState.clearInflight(currentTimeMs); + } + } + + private <T extends ApiRequestScope> void collectRequests( + List<RequestSpec<K>> requests, + BiMultimap<T, K> multimap, + BiFunction<Set<K>, T, AbstractRequest.Builder<?>> buildRequest + ) { + for (Map.Entry<T, Set<K>> entry : multimap.entrySet()) { + T scope = entry.getKey(); + + Set<K> keys = entry.getValue(); + if (keys.isEmpty()) { + continue; + } + + RequestState requestState = requestStates.computeIfAbsent(scope, c -> new RequestState()); + if (requestState.hasInflight()) { + continue; + } + + AbstractRequest.Builder<?> request = buildRequest.apply(keys, scope); + RequestSpec<K> spec = new RequestSpec<>( + handler.apiName() + "(api=" + request.apiKey() + ")", + scope, + new HashSet<>(keys), // copy to avoid exposing mutable state Review comment: line#285 also exposes `keys`. How about building an immutable copy and then pass it to both `RequestSpec` and `buildRequest`? ########## File path: clients/src/main/java/org/apache/kafka/common/utils/Utils.java ########## @@ -1343,4 +1343,11 @@ public static long getDateTime(String timestamp) throws ParseException, IllegalA public static boolean isBlank(String str) { return str == null || str.trim().isEmpty(); } + + public static <K, V> Map<K, V> initializeMap(Collection<K> keys, Supplier<V> valueSupplier) { + Map<K, V> res = new HashMap<>(); Review comment: Could you set the initial capacity for the map? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org