[ 
https://issues.apache.org/jira/browse/DRILL-5337?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16235911#comment-16235911
 ] 

ASF GitHub Bot commented on DRILL-5337:
---------------------------------------

Github user Vlad-Storona commented on a diff in the pull request:

    https://github.com/apache/drill/pull/774#discussion_r148567693
  
    --- Diff: 
contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBGroupScan.java
 ---
    @@ -0,0 +1,220 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.drill.exec.store.openTSDB;
    +
    +import com.fasterxml.jackson.annotation.JacksonInject;
    +import com.fasterxml.jackson.annotation.JsonCreator;
    +import com.fasterxml.jackson.annotation.JsonIgnore;
    +import com.fasterxml.jackson.annotation.JsonProperty;
    +import com.fasterxml.jackson.annotation.JsonTypeName;
    +import com.google.common.base.Preconditions;
    +import com.google.common.collect.ListMultimap;
    +import com.google.common.collect.Lists;
    +import com.google.common.collect.Maps;
    +import org.apache.drill.common.exceptions.ExecutionSetupException;
    +import org.apache.drill.common.expression.SchemaPath;
    +import org.apache.drill.exec.physical.EndpointAffinity;
    +import org.apache.drill.exec.physical.base.AbstractGroupScan;
    +import org.apache.drill.exec.physical.base.GroupScan;
    +import org.apache.drill.exec.physical.base.PhysicalOperator;
    +import org.apache.drill.exec.physical.base.ScanStats;
    +import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
    +import org.apache.drill.exec.store.StoragePluginRegistry;
    +import 
org.apache.drill.exec.store.openTSDB.OpenTSDBSubScan.OpenTSDBSubScanSpec;
    +import org.apache.drill.exec.store.schedule.AffinityCreator;
    +import org.apache.drill.exec.store.schedule.AssignmentCreator;
    +import org.apache.drill.exec.store.schedule.CompleteWork;
    +import org.apache.drill.exec.store.schedule.EndpointByteMap;
    +import org.apache.drill.exec.store.schedule.EndpointByteMapImpl;
    +
    +import java.io.IOException;
    +import java.util.Collection;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Objects;
    +
    +@JsonTypeName("openTSDB-scan")
    +public class OpenTSDBGroupScan extends AbstractGroupScan {
    +
    +  private static final long DEFAULT_TABLET_SIZE = 1000;
    +
    +  private OpenTSDBStoragePluginConfig storagePluginConfig;
    +  private OpenTSDBScanSpec openTSDBScanSpec;
    +  private OpenTSDBStoragePlugin storagePlugin;
    +
    +  private ListMultimap<Integer, OpenTSDBWork> assignments;
    +  private List<SchemaPath> columns;
    +  private List<OpenTSDBWork> openTSDBWorkList = Lists.newArrayList();
    +  private List<EndpointAffinity> affinities;
    +
    +  private boolean filterPushedDown = false;
    +
    +  @JsonCreator
    +  public OpenTSDBGroupScan(@JsonProperty("openTSDBScanSpec") 
OpenTSDBScanSpec openTSDBScanSpec,
    +                           @JsonProperty("storage") 
OpenTSDBStoragePluginConfig openTSDBStoragePluginConfig,
    +                           @JsonProperty("columns") List<SchemaPath> 
columns,
    +                           @JacksonInject StoragePluginRegistry 
pluginRegistry) throws IOException, ExecutionSetupException {
    +    this((OpenTSDBStoragePlugin) 
pluginRegistry.getPlugin(openTSDBStoragePluginConfig), openTSDBScanSpec, 
columns);
    +  }
    +
    +  public OpenTSDBGroupScan(OpenTSDBStoragePlugin storagePlugin,
    +                           OpenTSDBScanSpec scanSpec, List<SchemaPath> 
columns) {
    +    super((String) null);
    +    this.storagePlugin = storagePlugin;
    +    this.storagePluginConfig = storagePlugin.getConfig();
    +    this.openTSDBScanSpec = scanSpec;
    +    this.columns = columns == null || columns.size() == 0 ? ALL_COLUMNS : 
columns;
    +    init();
    +  }
    +
    +  /**
    +   * Private constructor, used for cloning.
    +   *
    +   * @param that The OpenTSDBGroupScan to clone
    +   */
    +  private OpenTSDBGroupScan(OpenTSDBGroupScan that) {
    +    super((String) null);
    +    this.columns = that.columns;
    +    this.openTSDBScanSpec = that.openTSDBScanSpec;
    +    this.storagePlugin = that.storagePlugin;
    +    this.storagePluginConfig = that.storagePluginConfig;
    +    this.filterPushedDown = that.filterPushedDown;
    +    this.openTSDBWorkList = that.openTSDBWorkList;
    +    this.assignments = that.assignments;
    +    init();
    +  }
    +
    +  private void init() {
    +    Collection<DrillbitEndpoint> endpoints = 
storagePlugin.getContext().getBits();
    +    Map<String, DrillbitEndpoint> endpointMap = Maps.newHashMap();
    +
    +    for (DrillbitEndpoint endpoint : endpoints) {
    +      endpointMap.put(endpoint.getAddress(), endpoint);
    +    }
    +  }
    +
    +  @Override
    +  public List<EndpointAffinity> getOperatorAffinity() {
    +    if (affinities == null) {
    +      affinities = AffinityCreator.getAffinityMap(openTSDBWorkList);
    +    }
    +    return affinities;
    +  }
    +
    +  @Override
    +  public int getMaxParallelizationWidth() {
    +    return openTSDBWorkList.size();
    +  }
    +
    +  @Override
    +  public void applyAssignments(List<DrillbitEndpoint> incomingEndpoints) {
    +    assignments = AssignmentCreator.getMappings(incomingEndpoints, 
openTSDBWorkList);
    +  }
    +
    +  @Override
    +  public OpenTSDBSubScan getSpecificScan(int minorFragmentId) {
    +    List<OpenTSDBSubScanSpec> scanSpecList = Lists.newArrayList();
    +    scanSpecList.add(new OpenTSDBSubScanSpec(getTableName()));
    +    return new OpenTSDBSubScan(storagePlugin, storagePluginConfig, 
scanSpecList, this.columns);
    +  }
    +
    +  @Override
    +  public ScanStats getScanStats() {
    +    //magic number ?
    +    long recordCount = 100000L * openTSDBWorkList.size();
    --- End diff --
    
    It is a weird calculation, I will rewrite it.


> OpenTSDB storage plugin
> -----------------------
>
>                 Key: DRILL-5337
>                 URL: https://issues.apache.org/jira/browse/DRILL-5337
>             Project: Apache Drill
>          Issue Type: New Feature
>          Components: Storage - Other
>            Reporter: Dmitriy Gavrilovych
>            Assignee: Dmitriy Gavrilovych
>            Priority: Major
>              Labels: features
>             Fix For: 1.12.0
>
>
> Storage plugin for OpenTSDB
> The plugin uses REST API to work with TSDB. 
> Expected queries are listed below:
> SELECT * FROM openTSDB.`warp.speed.test`;
> Return all elements from warp.speed.test table with default aggregator SUM
> SELECT * FROM openTSDB.`(metric=warp.speed.test)`;
> Return all elements from (metric=warp.speed.test) table as a previous query, 
> but with alternative FROM syntax
> SELECT * FROM openTSDB.`(metric=warp.speed.test, aggregator=avg)`;
> Return all elements from warp.speed.test table, but with the custom aggregator
> SELECT `timestamp`, sum(`aggregated value`) FROM 
> openTSDB.`(metric=warp.speed.test, aggregator=avg)` GROUP BY `timestamp`;
> Return aggregated and grouped value by standard drill functions from 
> warp.speed.test table, but with the custom aggregator
> SELECT * FROM openTSDB.`(metric=warp.speed.test, downsample=5m-avg)`
> Return data limited by downsample



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to