Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/15544#discussion_r139190161
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproxCountDistinctForIntervals.scala
 ---
    @@ -0,0 +1,248 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.sql.catalyst.expressions.aggregate
    +
    +import java.util
    +
    +import org.apache.spark.sql.catalyst.InternalRow
    +import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
    +import 
org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, 
TypeCheckSuccess}
    +import org.apache.spark.sql.catalyst.expressions.{AttributeReference, 
ExpectsInputTypes, Expression, ExpressionDescription}
    +import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData, 
HyperLogLogPlusPlusHelper}
    +import org.apache.spark.sql.types._
    +
    +/**
    + * This function counts the approximate number of distinct values (ndv) in
    + * intervals constructed from endpoints specified in 
`endpointsExpression`. The endpoints will be
    + * sorted into ascending order. To count ndv's in these intervals, apply 
the HyperLogLogPlusPlus
    + * algorithm in each of them.
    + * @param child to estimate the ndv's of.
    + * @param endpointsExpression to construct the intervals.
    + * @param relativeSD The maximum estimation error allowed in the 
HyperLogLogPlusPlus algorithm.
    + */
    +@ExpressionDescription(
    +  usage = """
    +    _FUNC_(col, array(endpoint_1, endpoint_2, ... endpoint_N)) - Returns 
the approximate
    +      number of distinct values (ndv) for intervals [endpoint_1, 
endpoint_2],
    +      (endpoint_2, endpoint_3], ... (endpoint_N-1, endpoint_N].
    +
    +    _FUNC_(col, array(endpoint_1, endpoint_2, ... endpoint_N), 
relativeSD=0.05) - Returns
    +      the approximate number of distinct values (ndv) for intervals with 
relativeSD, the maximum
    +      estimation error allowed in the HyperLogLogPlusPlus algorithm.
    +  """,
    +  extended = """
    +    Examples:
    +      > SELECT approx_count_distinct_for_intervals(10.0, array(5, 15, 25), 
0.01);
    +       [1, 0]
    +  """)
    +case class ApproxCountDistinctForIntervals(
    +    child: Expression,
    +    endpointsExpression: Expression,
    +    relativeSD: Double = 0.05,
    +    mutableAggBufferOffset: Int = 0,
    +    inputAggBufferOffset: Int = 0)
    +  extends ImperativeAggregate with ExpectsInputTypes {
    +
    +  def this(child: Expression, endpointsExpression: Expression) = {
    +    this(
    +      child = child,
    +      endpointsExpression = endpointsExpression,
    +      relativeSD = 0.05,
    +      mutableAggBufferOffset = 0,
    +      inputAggBufferOffset = 0)
    +  }
    +
    +  def this(child: Expression, endpointsExpression: Expression, relativeSD: 
Expression) = {
    +    this(
    +      child = child,
    +      endpointsExpression = endpointsExpression,
    +      relativeSD = HyperLogLogPlusPlus.validateDoubleLiteral(relativeSD),
    +      mutableAggBufferOffset = 0,
    +      inputAggBufferOffset = 0)
    +  }
    +
    +  override def inputTypes: Seq[AbstractDataType] = {
    +    Seq(TypeCollection(NumericType, TimestampType, DateType), ArrayType)
    +  }
    +
    +  // Mark as lazy so that endpointsExpression is not evaluated during tree 
transformation.
    +  lazy val endpoints: Array[Double] = {
    +    val doubleArray = (endpointsExpression.dataType, 
endpointsExpression.eval()) match {
    +      case (ArrayType(baseType: NumericType, _), arrayData: ArrayData) =>
    +        val numericArray = arrayData.toObjectArray(baseType)
    +        numericArray.map { x =>
    +          baseType.numeric.toDouble(x.asInstanceOf[baseType.InternalType])
    +        }
    +    }
    +    util.Arrays.sort(doubleArray)
    --- End diff --
    
    again, if it's only used internally, we can require the caller side to pass 
the endpoints sorted.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to