Github user paul-rogers commented on a diff in the pull request:

    https://github.com/apache/drill/pull/1027#discussion_r151572304
  
    --- Diff: 
contrib/storage-kafka/src/main/java/org/apache/drill/exec/store/kafka/KafkaGroupScan.java
 ---
    @@ -0,0 +1,320 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.drill.exec.store.kafka;
    +
    +import java.util.Arrays;
    +import java.util.Collection;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Set;
    +
    +import org.apache.commons.lang3.StringUtils;
    +import org.apache.drill.common.exceptions.ExecutionSetupException;
    +import org.apache.drill.common.exceptions.UserException;
    +import org.apache.drill.common.expression.SchemaPath;
    +import org.apache.drill.exec.physical.EndpointAffinity;
    +import org.apache.drill.exec.physical.base.AbstractGroupScan;
    +import org.apache.drill.exec.physical.base.GroupScan;
    +import org.apache.drill.exec.physical.base.PhysicalOperator;
    +import org.apache.drill.exec.physical.base.ScanStats;
    +import org.apache.drill.exec.physical.base.ScanStats.GroupScanProperty;
    +import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
    +import org.apache.drill.exec.store.StoragePluginRegistry;
    +import org.apache.drill.exec.store.kafka.KafkaSubScan.KafkaSubScanSpec;
    +import org.apache.drill.exec.store.schedule.AffinityCreator;
    +import org.apache.drill.exec.store.schedule.AssignmentCreator;
    +import org.apache.drill.exec.store.schedule.CompleteWork;
    +import org.apache.drill.exec.store.schedule.EndpointByteMap;
    +import org.apache.drill.exec.store.schedule.EndpointByteMapImpl;
    +import org.apache.kafka.clients.consumer.KafkaConsumer;
    +import org.apache.kafka.common.Node;
    +import org.apache.kafka.common.PartitionInfo;
    +import org.apache.kafka.common.TopicPartition;
    +import org.apache.kafka.common.serialization.ByteArrayDeserializer;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +import com.fasterxml.jackson.annotation.JacksonInject;
    +import com.fasterxml.jackson.annotation.JsonCreator;
    +import com.fasterxml.jackson.annotation.JsonIgnore;
    +import com.fasterxml.jackson.annotation.JsonProperty;
    +import com.fasterxml.jackson.annotation.JsonTypeName;
    +import com.google.common.base.Preconditions;
    +import com.google.common.collect.ListMultimap;
    +import com.google.common.collect.Lists;
    +import com.google.common.collect.Maps;
    +
    +@JsonTypeName("kafka-scan")
    +public class KafkaGroupScan extends AbstractGroupScan {
    +
    +  private static final Logger logger = 
LoggerFactory.getLogger(KafkaGroupScan.class);
    +
    +  // Assuming default average topic message size as 1KB, which will be 
used to
    +  // compute the stats and work assignments
    +  private static final long MSG_SIZE = 1024;
    +
    +  private final KafkaStoragePlugin kafkaStoragePlugin;
    +  private final KafkaStoragePluginConfig kafkaStoragePluginConfig;
    +  private List<SchemaPath> columns;
    +  private final KafkaScanSpec kafkaScanSpec;
    +
    +  private List<PartitionScanWork> partitionWorkList;
    +  private ListMultimap<Integer, PartitionScanWork> assignments;
    +  private List<EndpointAffinity> affinities;
    +
    +  @JsonCreator
    +  public KafkaGroupScan(@JsonProperty("userName") String userName,
    +      @JsonProperty("kafkaStoragePluginConfig") KafkaStoragePluginConfig 
kafkaStoragePluginConfig,
    +      @JsonProperty("columns") List<SchemaPath> columns, 
@JsonProperty("scanSpec") KafkaScanSpec scanSpec,
    +      @JacksonInject StoragePluginRegistry pluginRegistry) {
    +    this(userName, kafkaStoragePluginConfig, columns, scanSpec, 
(KafkaStoragePlugin) pluginRegistry);
    +  }
    +
    +  public KafkaGroupScan(KafkaStoragePlugin kafkaStoragePlugin, 
KafkaScanSpec kafkaScanSpec, List<SchemaPath> columns) {
    +    super(StringUtils.EMPTY);
    +    this.kafkaStoragePlugin = kafkaStoragePlugin;
    +    this.kafkaStoragePluginConfig = (KafkaStoragePluginConfig) 
kafkaStoragePlugin.getConfig();
    +    this.columns = columns;
    +    this.kafkaScanSpec = kafkaScanSpec;
    +    init();
    +  }
    +
    +  public KafkaGroupScan(String userName, KafkaStoragePluginConfig 
kafkaStoragePluginConfig, List<SchemaPath> columns,
    +      KafkaScanSpec kafkaScanSpec, KafkaStoragePlugin pluginRegistry) {
    +    super(userName);
    +    this.kafkaStoragePluginConfig = kafkaStoragePluginConfig;
    +    this.columns = columns;
    +    this.kafkaScanSpec = kafkaScanSpec;
    +    this.kafkaStoragePlugin = pluginRegistry;
    +    init();
    +  }
    +
    +  public KafkaGroupScan(KafkaGroupScan that) {
    +    super(that);
    +    this.kafkaStoragePluginConfig = that.kafkaStoragePluginConfig;
    +    this.columns = that.columns;
    +    this.kafkaScanSpec = that.kafkaScanSpec;
    +    this.kafkaStoragePlugin = that.kafkaStoragePlugin;
    +    this.partitionWorkList = that.partitionWorkList;
    +    this.assignments = that.assignments;
    +  }
    +
    +  private static class PartitionScanWork implements CompleteWork {
    +
    +    private final EndpointByteMapImpl byteMap = new EndpointByteMapImpl();
    +
    +    private final TopicPartition topicPartition;
    +    private final long beginOffset;
    +    private final long latestOffset;
    +
    +    public PartitionScanWork(TopicPartition topicPartition, long 
beginOffset, long latestOffset) {
    +      this.topicPartition = topicPartition;
    +      this.beginOffset = beginOffset;
    +      this.latestOffset = latestOffset;
    +    }
    +
    +    public TopicPartition getTopicPartition() {
    +      return topicPartition;
    +    }
    +
    +    public long getBeginOffset() {
    +      return beginOffset;
    +    }
    +
    +    public long getLatestOffset() {
    +      return latestOffset;
    +    }
    +
    +    @Override
    +    public int compareTo(CompleteWork o) {
    +      return Long.compare(getTotalBytes(), o.getTotalBytes());
    +    }
    +
    +    @Override
    +    public long getTotalBytes() {
    +      return (latestOffset - beginOffset) * MSG_SIZE;
    +    }
    +
    +    @Override
    +    public EndpointByteMap getByteMap() {
    +      return byteMap;
    +    }
    +
    +  }
    +
    +  /**
    +   * Computes work per topic partition, based on start and end offset of 
each
    +   * corresponding topicPartition
    +   */
    +  private void init() {
    +    partitionWorkList = Lists.newArrayList();
    +    Collection<DrillbitEndpoint> endpoints = 
kafkaStoragePlugin.getContext().getBits();
    +    Map<String, DrillbitEndpoint> endpointMap = Maps.newHashMap();
    +    for (DrillbitEndpoint endpoint : endpoints) {
    +      endpointMap.put(endpoint.getAddress(), endpoint);
    +    }
    +
    +    Map<TopicPartition, Long> startOffsetsMap = Maps.newHashMap();
    +    Map<TopicPartition, Long> endOffsetsMap = Maps.newHashMap();
    +    List<PartitionInfo> topicPartitions = null;
    +    String topicName = kafkaScanSpec.getTopicName();
    +
    +    try (KafkaConsumer<?, ?> kafkaConsumer = new 
KafkaConsumer<>(kafkaStoragePlugin.getConfig().getKafkaConsumerProps(),
    +        new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
    +      if (!kafkaConsumer.listTopics().keySet().contains(topicName)) {
    +        throw UserException.permissionError()
    +            .message("Table '%s' does not exist", topicName)
    +            .build(logger);
    +      }
    +
    +      kafkaConsumer.subscribe(Arrays.asList(topicName));
    +      // based on KafkaConsumer JavaDoc, seekToBeginning/seekToEnd 
functions
    +      // evaluates lazily, seeking to the first/last offset in all 
partitions only
    +      // when poll(long) or
    +      // position(TopicPartition) are called
    +      kafkaConsumer.poll(0);
    +      Set<TopicPartition> assignments = kafkaConsumer.assignment();
    +      topicPartitions = kafkaConsumer.partitionsFor(topicName);
    +
    +      // fetch start offsets for each topicPartition
    +      kafkaConsumer.seekToBeginning(assignments);
    +      for (TopicPartition topicPartition : assignments) {
    +        startOffsetsMap.put(topicPartition, 
kafkaConsumer.position(topicPartition));
    +      }
    +
    +      // fetch end offsets for each topicPartition
    +      kafkaConsumer.seekToEnd(assignments);
    +      for (TopicPartition topicPartition : assignments) {
    +        endOffsetsMap.put(topicPartition, 
kafkaConsumer.position(topicPartition));
    +      }
    +    } catch (Exception e) {
    +      logger.error(e.getMessage(), e);
    --- End diff --
    
    Turns out that the `UserException` will take care of logging in the 
`build()` call.


---

Reply via email to