cboumalh commented on code in PR #51298: URL: https://github.com/apache/spark/pull/51298#discussion_r2331752912
########## sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/thetasketchesAggregates.scala: ########## @@ -0,0 +1,662 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions.aggregate + +import org.apache.datasketches.memory.Memory +import org.apache.datasketches.theta.{CompactSketch, Intersection, SetOperation, Sketch, Union, UpdateSketch, UpdateSketchBuilder} + +import org.apache.spark.SparkUnsupportedOperationException +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.expressions.{ExpectsInputTypes, Expression, ExpressionDescription, Literal} +import org.apache.spark.sql.catalyst.expressions.aggregate.TypedImperativeAggregate +import org.apache.spark.sql.catalyst.trees.BinaryLike +import org.apache.spark.sql.catalyst.util.{ArrayData, CollationFactory, ThetaSketchUtils} +import org.apache.spark.sql.errors.QueryExecutionErrors +import org.apache.spark.sql.internal.types.StringTypeWithCollation +import org.apache.spark.sql.types.{AbstractDataType, ArrayType, BinaryType, DataType, DoubleType, FloatType, IntegerType, LongType, StringType, TypeCollection} +import org.apache.spark.unsafe.types.UTF8String + +sealed trait ThetaSketchState { + def serialize(): Array[Byte] + def eval(): Array[Byte] +} +case class UpdatableSketchBuffer(sketch: UpdateSketch) extends ThetaSketchState { + override def serialize(): Array[Byte] = sketch.rebuild.compact.toByteArrayCompressed + override def eval(): Array[Byte] = sketch.rebuild.compact.toByteArrayCompressed +} +case class UnionAggregationBuffer(union: Union) extends ThetaSketchState { + override def serialize(): Array[Byte] = union.getResult.toByteArrayCompressed + override def eval(): Array[Byte] = union.getResult.toByteArrayCompressed +} +case class IntersectionAggregationBuffer(intersection: Intersection) extends ThetaSketchState { + override def serialize(): Array[Byte] = intersection.getResult.toByteArrayCompressed + override def eval(): Array[Byte] = intersection.getResult.toByteArrayCompressed +} +case class FinalizedSketch(sketch: CompactSketch) extends ThetaSketchState { + override def serialize(): Array[Byte] = sketch.toByteArrayCompressed + override def eval(): Array[Byte] = sketch.toByteArrayCompressed +} + +/** + * The ThetaSketchAgg function utilizes a Datasketches ThetaSketch instance to count a + * probabilistic approximation of the number of unique values in a given column, and outputs the + * binary representation of the ThetaSketch. + * + * See [[https://datasketches.apache.org/docs/Theta/ThetaSketches.html]] for more information. + * + * @param left + * child expression against which unique counting will occur + * @param right + * the log-base-2 of nomEntries decides the number of buckets for the sketch + * @param mutableAggBufferOffset + * offset for mutable aggregation buffer + * @param inputAggBufferOffset + * offset for input aggregation buffer + */ +// scalastyle:off line.size.limit +@ExpressionDescription( + usage = """ + _FUNC_(expr, lgNomEntries) - Returns the ThetaSketch compact binary representation. + `lgNomEntries` (optional) is the log-base-2 of nominal entries, with nominal entries deciding + the number buckets or slots for the ThetaSketch. """, + examples = """ + Examples: + > SELECT theta_sketch_estimate(_FUNC_(col, 12)) FROM VALUES (1), (1), (2), (2), (3) tab(col); + 3 + """, + group = "agg_funcs", + since = "4.1.0") +// scalastyle:on line.size.limit +case class ThetaSketchAgg( + left: Expression, + right: Expression, + override val mutableAggBufferOffset: Int, + override val inputAggBufferOffset: Int) + extends TypedImperativeAggregate[ThetaSketchState] + with BinaryLike[Expression] + with ExpectsInputTypes { + + // ThetaSketch config - mark as lazy so that they're not evaluated during tree transformation. + + lazy val lgNomEntries: Int = { + val lgNomEntriesInput = right.eval().asInstanceOf[Int] + ThetaSketchUtils.checkLgNomLongs(lgNomEntriesInput, prettyName) + lgNomEntriesInput + } + + // Constructors + + def this(child: Expression) = { + this(child, Literal(ThetaSketchUtils.DEFAULT_LG_NOM_LONGS), 0, 0) + } + + def this(child: Expression, lgNomEntries: Expression) = { + this(child, lgNomEntries, 0, 0) + } + + def this(child: Expression, lgNomEntries: Int) = { + this(child, Literal(lgNomEntries), 0, 0) + } + + // Copy constructors required by ImperativeAggregate + + override def withNewMutableAggBufferOffset(newMutableAggBufferOffset: Int): ThetaSketchAgg = + copy(mutableAggBufferOffset = newMutableAggBufferOffset) + + override def withNewInputAggBufferOffset(newInputAggBufferOffset: Int): ThetaSketchAgg = + copy(inputAggBufferOffset = newInputAggBufferOffset) + + override protected def withNewChildrenInternal( + newLeft: Expression, + newRight: Expression): ThetaSketchAgg = + copy(left = newLeft, right = newRight) + + // Overrides for TypedImperativeAggregate + + override def prettyName: String = "theta_sketch_agg" + + override def inputTypes: Seq[AbstractDataType] = + Seq( + TypeCollection( + ArrayType(IntegerType), + ArrayType(LongType), + BinaryType, + DoubleType, + FloatType, + IntegerType, + LongType, + StringTypeWithCollation(supportsTrimCollation = true)), + IntegerType) + + override def dataType: DataType = BinaryType + + override def nullable: Boolean = false + + /** + * Instantiate an UpdateSketch instance using the lgNomEntries param. + * + * @return + * an UpdateSketch instance wrapped with UpdatableSketchBuffer + */ + override def createAggregationBuffer(): ThetaSketchState = { + val builder = new UpdateSketchBuilder + builder.setLogNominalEntries(lgNomEntries) + UpdatableSketchBuffer(builder.build) + } + + /** + * Evaluate the input row and update the UpdateSketch instance with the row's value. The update + * function only supports a subset of Spark SQL types, and an exception will be thrown for + * unsupported types. + * + * @param updateBuffer + * A previously initialized UpdateSketch instance + * @param input + * An input row + */ + override def update(updateBuffer: ThetaSketchState, input: InternalRow): ThetaSketchState = { + // Return early for null values. + val v = left.eval(input) + if (v == null) return updateBuffer + + // Initialized buffer should be UpdatableSketchBuffer, else error out. + val sketch = updateBuffer match { + case UpdatableSketchBuffer(s) => s + case _ => throw QueryExecutionErrors.thetaInvalidInputSketchBuffer(prettyName) + } + + // Handle the different data types for sketch updates. + left.dataType match { + case ArrayType(IntegerType, _) => + val arr = v.asInstanceOf[ArrayData].toIntArray() + if (arr.nonEmpty) sketch.update(arr) + case ArrayType(LongType, _) => + val arr = v.asInstanceOf[ArrayData].toLongArray() + if (arr.nonEmpty) sketch.update(arr) + case BinaryType => + val bytes = v.asInstanceOf[Array[Byte]] + if (bytes.nonEmpty) sketch.update(bytes) + case DoubleType => + sketch.update(v.asInstanceOf[Double]) + case FloatType => + sketch.update(v.asInstanceOf[Float].toDouble) // Float is promoted to double. + case IntegerType => + sketch.update(v.asInstanceOf[Int].toLong) // Int is promoted to Long. + case LongType => + sketch.update(v.asInstanceOf[Long]) + case st: StringType => + val cKey = + CollationFactory.getCollationKey(v.asInstanceOf[UTF8String], st.collationId) + sketch.update(cKey.toString) Review Comment: I'm of the opinion of keeping the implementation as is. I think ignoring empty arrays/empty byte arrays/empty strings makes the most sense. And this is what the Datasketches library does under the hood. I feel like it is more intuitive and will make more sense for theta sketch users, and it is probably what they are used to in whatever implementations they currently use today (udfs, udafs). If we are aggregating household ids for example from a dataset, we don't want to consider the intersection of two household datasets to be 3000 for example when in reality its all empty strings. This can be misleading. Also, I'd argue against deserializing any sketch with metadata because this will make the data useless outside the spark ecosystem. A huge feature of datasketches is how portable they are across different languages and storages. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org