315157973 commented on a change in pull request #8125:
URL: https://github.com/apache/pulsar/pull/8125#discussion_r667590395



##########
File path: 
pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java
##########
@@ -251,6 +253,9 @@
     private volatile CompletableFuture<Void> closeFuture;
     // key is listener name , value is pulsar address and pulsar ssl address
     private Map<String, AdvertisedListener> advertisedListeners;
+    // For REST api, keep track of topics served by this pulsar.
+    @Getter
+    private ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<Integer>> 
owningTopics = new ConcurrentOpenHashMap<>();

Review comment:
       Why do I need to cache all topics? We only need to determine whether the 
Topic is owned by current Broker, otherwise it will be redirected.
   Broker already has Topic cache.
   We can determine whether it belongs to the current cluster by checking the 
Namespace.

##########
File path: 
pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TopicsBase.java
##########
@@ -0,0 +1,715 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.admin.impl;
+
+import io.netty.buffer.ByteBuf;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.ByteBuffer;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.text.DateFormat;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+import java.util.stream.Collectors;
+import javax.ws.rs.container.AsyncResponse;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.bookkeeper.mledger.ManagedLedgerException;
+import org.apache.bookkeeper.mledger.impl.PositionImpl;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.pulsar.broker.lookup.LookupResult;
+import org.apache.pulsar.broker.namespace.LookupOptions;
+import org.apache.pulsar.broker.rest.RestMessagePublishContext;
+import org.apache.pulsar.broker.service.BrokerServiceException;
+import org.apache.pulsar.broker.service.schema.SchemaRegistry;
+import org.apache.pulsar.broker.service.schema.exceptions.SchemaException;
+import org.apache.pulsar.broker.web.RestException;
+import org.apache.pulsar.client.api.CompressionType;
+import org.apache.pulsar.client.api.Message;
+import org.apache.pulsar.client.api.MessageId;
+import org.apache.pulsar.client.api.PulsarClientException;
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.impl.MessageIdImpl;
+import org.apache.pulsar.client.impl.MessageImpl;
+import org.apache.pulsar.client.impl.schema.AutoConsumeSchema;
+import org.apache.pulsar.client.impl.schema.KeyValueSchema;
+import org.apache.pulsar.client.impl.schema.KeyValueSchemaInfo;
+import org.apache.pulsar.client.impl.schema.StringSchema;
+import org.apache.pulsar.client.impl.schema.generic.GenericJsonRecord;
+import org.apache.pulsar.common.api.proto.MessageMetadata;
+import org.apache.pulsar.common.compression.CompressionCodecProvider;
+import org.apache.pulsar.common.naming.TopicName;
+import org.apache.pulsar.common.partition.PartitionedTopicMetadata;
+import org.apache.pulsar.common.protocol.Commands;
+import org.apache.pulsar.common.protocol.schema.SchemaData;
+import org.apache.pulsar.common.protocol.schema.SchemaVersion;
+import org.apache.pulsar.common.schema.KeyValueEncodingType;
+import org.apache.pulsar.common.schema.LongSchemaVersion;
+import org.apache.pulsar.common.schema.SchemaInfo;
+import org.apache.pulsar.common.schema.SchemaType;
+import org.apache.pulsar.common.util.FutureUtil;
+import org.apache.pulsar.common.util.ObjectMapperFactory;
+import org.apache.pulsar.common.util.collections.ConcurrentOpenHashSet;
+import org.apache.pulsar.websocket.data.ProducerAck;
+import org.apache.pulsar.websocket.data.ProducerAcks;
+import org.apache.pulsar.websocket.data.ProducerMessage;
+import org.apache.pulsar.websocket.data.ProducerMessages;
+
+/**
+ * Contains methods used by REST api to producer/consumer/read messages 
to/from pulsar topics.
+ */
+@Slf4j
+public class TopicsBase extends PersistentTopicsBase {
+
+    private static String defaultProducerName = "RestProducer";
+
+    // Publish message to a topic, can be partitioned or non-partitioned
+    protected void publishMessages(AsyncResponse asyncResponse, 
ProducerMessages request,
+                                           boolean authoritative) {
+        String topic = topicName.getPartitionedTopicName();
+        if (pulsar().getOwningTopics().containsKey(topic) || 
!findOwnerBrokerForTopic(authoritative, asyncResponse)) {
+            // If we've done look up or or after look up this broker owns some 
of the partitions
+            // then proceed to publish message else asyncResponse will be 
complete by look up.
+            addOrGetSchemaForTopic(getSchemaData(request.getKeySchema(), 
request.getValueSchema()),
+                    request.getSchemaVersion() == -1 ? null : new 
LongSchemaVersion(request.getSchemaVersion()))
+            .thenAccept(schemaMeta -> {
+                // Both schema version and schema data are necessary.
+                if (schemaMeta.getLeft() != null && schemaMeta.getRight() != 
null) {
+                    internalPublishMessages(topicName, request, 
pulsar().getOwningTopics().get(topic).values(),
+                            asyncResponse, 
AutoConsumeSchema.getSchema(schemaMeta.getLeft().toSchemaInfo()),
+                            schemaMeta.getRight());
+                } else {
+                    asyncResponse.resume(new 
RestException(Status.INTERNAL_SERVER_ERROR,
+                            "Fail to add or retrieve schema."));
+                }
+            }).exceptionally(e -> {
+                if (log.isDebugEnabled()) {
+                    log.debug("Fail to publish message: " + e.getMessage());
+                }
+                asyncResponse.resume(new 
RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message:"
+                        + e.getMessage()));
+                return null;
+            });
+        }
+    }
+
+    // Publish message to single partition of a partitioned topic.
+    protected void publishMessagesToPartition(AsyncResponse asyncResponse, 
ProducerMessages request,
+                                                     boolean authoritative, 
int partition) {
+        if (topicName.isPartitioned()) {
+            asyncResponse.resume(new RestException(Status.BAD_REQUEST, "Topic 
name can't contain "
+                    + "'-partition-' suffix."));
+        }
+        String topic = topicName.getPartitionedTopicName();
+        // If broker owns the partition then proceed to publish message, else 
do look up.
+        if ((pulsar().getOwningTopics().containsKey(topic) && 
pulsar().getOwningTopics().get(topic).contains(partition))
+                || !findOwnerBrokerForTopic(authoritative, asyncResponse)) {
+            addOrGetSchemaForTopic(getSchemaData(request.getKeySchema(), 
request.getValueSchema()),
+                    request.getSchemaVersion() == -1 ? null : new 
LongSchemaVersion(request.getSchemaVersion()))
+            .thenAccept(schemaMeta -> {
+                // Both schema version and schema data are necessary.
+                if (schemaMeta.getLeft() != null && schemaMeta.getRight() != 
null) {
+                    internalPublishMessagesToPartition(topicName, request, 
partition, asyncResponse,
+                            
AutoConsumeSchema.getSchema(schemaMeta.getLeft().toSchemaInfo()), 
schemaMeta.getRight());
+                } else {
+                    asyncResponse.resume(new 
RestException(Status.INTERNAL_SERVER_ERROR,
+                            "Fail to add or retrieve schema."));
+                }
+            }).exceptionally(e -> {
+                if (log.isDebugEnabled()) {
+                    log.debug("Fail to publish message to single partition: " 
+ e.getLocalizedMessage());
+                }
+                asyncResponse.resume(new 
RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message"
+                        + "to single partition: "
+                        + e.getMessage()));
+                return null;
+            });
+        }
+    }
+
+    private void internalPublishMessagesToPartition(TopicName topicName, 
ProducerMessages request,
+                                                  int partition, AsyncResponse 
asyncResponse,
+                                                  Schema schema, SchemaVersion 
schemaVersion) {
+        try {
+            String producerName = (null == request.getProducerName() || 
request.getProducerName().isEmpty())
+                    ? defaultProducerName : request.getProducerName();
+            List<Message> messages = buildMessage(request, schema, 
producerName);
+            List<CompletableFuture<PositionImpl>> publishResults = new 
ArrayList<>();
+            List<ProducerAck> produceMessageResults = new ArrayList<>();
+            for (int index = 0; index < messages.size(); index++) {
+                ProducerAck produceMessageResult = new ProducerAck();
+                produceMessageResult.setMessageId(partition + "");
+                produceMessageResults.add(produceMessageResult);
+                
publishResults.add(publishSingleMessageToPartition(topicName.getPartition(partition).toString(),
+                        messages.get(index)));
+            }
+            FutureUtil.waitForAll(publishResults).thenRun(() -> {
+                processPublishMessageResults(produceMessageResults, 
publishResults);
+                asyncResponse.resume(Response.ok().entity(new 
ProducerAcks(produceMessageResults,
+                        ((LongSchemaVersion) 
schemaVersion).getVersion())).build());
+            }).exceptionally(e -> {
+                // Some message may published successfully, so till return ok 
with result for each individual message.
+                processPublishMessageResults(produceMessageResults, 
publishResults);
+                asyncResponse.resume(Response.ok().entity(new 
ProducerAcks(produceMessageResults,
+                        ((LongSchemaVersion) 
schemaVersion).getVersion())).build());
+                return null;
+            });
+        } catch (Exception e) {
+            if (log.isDebugEnabled()) {
+                log.debug("Fail publish messages to single partition with rest 
produce message "
+                                + "request for topic  {}: {} ", topicName, 
e.getCause());
+            }
+            asyncResponse.resume(new 
RestException(Status.INTERNAL_SERVER_ERROR, e.getMessage()));
+        }
+    }
+
+    private void internalPublishMessages(TopicName topicName, ProducerMessages 
request,
+                                                     List<Integer> 
partitionIndexes,
+                                                     AsyncResponse 
asyncResponse, Schema schema,
+                                                     SchemaVersion 
schemaVersion) {
+        if (partitionIndexes.size() < 1) {
+            asyncResponse.resume(new 
RestException(Status.INTERNAL_SERVER_ERROR,
+                    new BrokerServiceException.TopicNotFoundException("Topic 
not owned by current broker.")));
+        }
+        try {
+            String producerName = (null == request.getProducerName() || 
request.getProducerName().isEmpty())
+                    ? defaultProducerName : request.getProducerName();
+            List<Message> messages = buildMessage(request, schema, 
producerName);
+            List<CompletableFuture<PositionImpl>> publishResults = new 
ArrayList<>();
+            List<ProducerAck> produceMessageResults = new ArrayList<>();
+            // Try to publish messages to all partitions this broker owns in 
round robin mode.
+            for (int index = 0; index < messages.size(); index++) {
+                ProducerAck produceMessageResult = new ProducerAck();
+                produceMessageResult.setMessageId(partitionIndexes.get(index % 
(int) partitionIndexes.size()) + "");
+                produceMessageResults.add(produceMessageResult);
+                
publishResults.add(publishSingleMessageToPartition(topicName.getPartition(partitionIndexes
+                                .get(index % (int) 
partitionIndexes.size())).toString(),
+                        messages.get(index)));
+            }
+            FutureUtil.waitForAll(publishResults).thenRun(() -> {
+                processPublishMessageResults(produceMessageResults, 
publishResults);
+                asyncResponse.resume(Response.ok().entity(new 
ProducerAcks(produceMessageResults,
+                        ((LongSchemaVersion) 
schemaVersion).getVersion())).build());
+            }).exceptionally(e -> {
+                // Some message may published successfully, so till return ok 
with result for each individual message.
+                processPublishMessageResults(produceMessageResults, 
publishResults);
+                asyncResponse.resume(Response.ok().entity(new 
ProducerAcks(produceMessageResults,
+                        ((LongSchemaVersion) 
schemaVersion).getVersion())).build());
+                return null;
+            });
+        } catch (Exception e) {
+            if (log.isDebugEnabled()) {
+                log.debug("Fail to publish messages with rest produce message 
request for topic  {}: {} ",
+                        topicName, e.getCause());
+            }
+            asyncResponse.resume(new 
RestException(Status.INTERNAL_SERVER_ERROR, e.getMessage()));
+        }
+    }
+
+    private CompletableFuture<PositionImpl> 
publishSingleMessageToPartition(String topic, Message message) {
+        CompletableFuture<PositionImpl> publishResult = new 
CompletableFuture<>();
+        pulsar().getBrokerService().getTopic(topic, false)
+        .thenAccept(t -> {
+            // TODO: Check message backlog and fail if backlog too large.
+            if (!t.isPresent()) {
+                // Topic not found, and remove from owning partition list.
+                publishResult.completeExceptionally(new 
BrokerServiceException.TopicNotFoundException("Topic not "
+                        + "owned by current broker."));
+                TopicName topicName = TopicName.get(topic);
+                
pulsar().getOwningTopics().get(topicName.getPartitionedTopicName())
+                        .remove(topicName.getPartitionIndex());
+            } else {
+                try {
+                    t.get().publishMessage(messageToByteBuf(message),
+                            RestMessagePublishContext.get(publishResult, 
t.get(), System.nanoTime()));
+                } catch (Exception e) {
+                    if (log.isDebugEnabled()) {
+                        log.debug("Fail to publish single messages to topic  
{}: {} ",
+                                topicName, e.getCause());
+                    }
+                    publishResult.completeExceptionally(e);
+                }
+            }
+        });
+
+        return publishResult;
+    }
+
+    // Process results for all message publishing attempts
+    private void processPublishMessageResults(List<ProducerAck> 
produceMessageResults,
+                                              
List<CompletableFuture<PositionImpl>> publishResults) {
+        // process publish message result
+        for (int index = 0; index < publishResults.size(); index++) {
+            try {
+                PositionImpl position = publishResults.get(index).get();
+                MessageId messageId = new 
MessageIdImpl(position.getLedgerId(), position.getEntryId(),
+                        
Integer.parseInt(produceMessageResults.get(index).getMessageId()));
+                
produceMessageResults.get(index).setMessageId(messageId.toString());
+            } catch (Exception e) {
+                if (log.isDebugEnabled()) {
+                    log.debug("Fail publish [{}] message with rest produce 
message request for topic  {}: {} ",
+                            index, topicName);
+                }
+                if (e instanceof 
BrokerServiceException.TopicNotFoundException) {
+                    // Topic ownership might changed, force to look up again.
+                    
pulsar().getOwningTopics().remove(topicName.getPartitionedTopicName());
+                }
+                extractException(e, produceMessageResults.get(index));
+            }
+        }
+    }
+
+    // Return error code depends on exception we got indicating if client 
should retry with same broker.
+    private void extractException(Exception e, ProducerAck 
produceMessageResult) {
+        if (!(e instanceof BrokerServiceException.TopicFencedException && e 
instanceof ManagedLedgerException)) {
+            produceMessageResult.setErrorCode(2);
+        } else {
+            produceMessageResult.setErrorCode(1);
+        }
+        produceMessageResult.setErrorMsg(e.getMessage());
+    }
+
+    // Look up topic owner for given topic. Return if asyncResponse has been 
completed
+    // which indicating redirect or exception.
+    private boolean findOwnerBrokerForTopic(boolean authoritative, 
AsyncResponse asyncResponse) {

Review comment:
       Why not use TopicLookupBase directly




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to