Github user wzhfy commented on a diff in the pull request:

    https://github.com/apache/spark/pull/15544#discussion_r140131809
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproxCountDistinctForIntervals.scala
 ---
    @@ -0,0 +1,235 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.sql.catalyst.expressions.aggregate
    +
    +import java.util
    +
    +import org.apache.spark.sql.catalyst.InternalRow
    +import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
    +import 
org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, 
TypeCheckSuccess}
    +import org.apache.spark.sql.catalyst.expressions.{AttributeReference, 
ExpectsInputTypes, Expression}
    +import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData, 
HyperLogLogPlusPlusHelper}
    +import org.apache.spark.sql.types._
    +
    +/**
    + * This function counts the approximate number of distinct values (ndv) in
    + * intervals constructed from endpoints specified in 
`endpointsExpression`. The endpoints should be
    + * sorted into ascending order. E.g., given an array of endpoints
    + * (endpoint_1, endpoint_2, ... endpoint_N), returns the approximate ndv's 
for intervals
    + * [endpoint_1, endpoint_2], (endpoint_2, endpoint_3], ... (endpoint_N-1, 
endpoint_N].
    + * To count ndv's in these intervals, apply the HyperLogLogPlusPlus 
algorithm in each of them.
    + * @param child to estimate the ndv's of.
    + * @param endpointsExpression to construct the intervals, should be sorted 
into ascending order.
    + * @param relativeSD The maximum estimation error allowed in the 
HyperLogLogPlusPlus algorithm.
    + */
    +case class ApproxCountDistinctForIntervals(
    +    child: Expression,
    +    endpointsExpression: Expression,
    +    relativeSD: Double = 0.05,
    +    mutableAggBufferOffset: Int = 0,
    +    inputAggBufferOffset: Int = 0)
    +  extends ImperativeAggregate with ExpectsInputTypes {
    +
    +  def this(child: Expression, endpointsExpression: Expression) = {
    +    this(
    +      child = child,
    +      endpointsExpression = endpointsExpression,
    +      relativeSD = 0.05,
    +      mutableAggBufferOffset = 0,
    +      inputAggBufferOffset = 0)
    +  }
    +
    +  def this(child: Expression, endpointsExpression: Expression, relativeSD: 
Expression) = {
    +    this(
    +      child = child,
    +      endpointsExpression = endpointsExpression,
    +      relativeSD = HyperLogLogPlusPlus.validateDoubleLiteral(relativeSD),
    +      mutableAggBufferOffset = 0,
    +      inputAggBufferOffset = 0)
    +  }
    +
    +  override def inputTypes: Seq[AbstractDataType] = {
    +    Seq(TypeCollection(NumericType, TimestampType, DateType), ArrayType)
    +  }
    +
    +  // Mark as lazy so that endpointsExpression is not evaluated during tree 
transformation.
    +  lazy val endpoints: Array[Double] =
    +    (endpointsExpression.dataType, endpointsExpression.eval()) match {
    +      case (ArrayType(baseType: NumericType, _), arrayData: ArrayData) =>
    +        val numericArray = arrayData.toObjectArray(baseType)
    +        numericArray.map { x =>
    +          baseType.numeric.toDouble(x.asInstanceOf[baseType.InternalType])
    +        }
    +    }
    +
    +  override def checkInputDataTypes(): TypeCheckResult = {
    +    val defaultCheck = super.checkInputDataTypes()
    +    if (defaultCheck.isFailure) {
    +      defaultCheck
    +    } else if (!endpointsExpression.foldable) {
    +      TypeCheckFailure("The intervals provided must be constant literals")
    --- End diff --
    
    I'll check element type of `endpointsExpression`, thanks.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to