Github user setjet commented on a diff in the pull request:

    https://github.com/apache/spark/pull/18113#discussion_r155070641
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/typedaggregators.scala
 ---
    @@ -99,3 +96,165 @@ class TypedAverage[IN](val f: IN => Double) extends 
Aggregator[IN, (Double, Long
         toColumn.asInstanceOf[TypedColumn[IN, java.lang.Double]]
       }
     }
    +
    +class TypedMinDouble[IN](val f: IN => Double)
    +  extends Aggregator[IN, MutableDouble, java.lang.Double] {
    +  override def zero: MutableDouble = null
    +  override def reduce(b: MutableDouble, a: IN): MutableDouble = {
    +    if (b == null) {
    +      new MutableDouble(f(a))
    +    } else {
    +      b.value = math.min(b.value, f(a))
    +      b
    +    }
    +  }
    +  override def merge(b1: MutableDouble, b2: MutableDouble): MutableDouble 
= {
    +    if (b1 == null) {
    +      b2
    +    } else if (b2 == null) {
    +      b1
    +    } else {
    +      b1.value = math.min(b1.value, b2.value)
    +      b1
    +    }
    +  }
    +  override def finish(reduction: MutableDouble): java.lang.Double = {
    +    if (reduction == null) {
    +      null
    +    } else {
    +      reduction.toJavaDouble
    +    }
    +  }
    +
    +  override def bufferEncoder: Encoder[MutableDouble] = 
Encoders.kryo[MutableDouble]
    +  override def outputEncoder: Encoder[java.lang.Double] = 
ExpressionEncoder[java.lang.Double]()
    +
    +  // Java api support
    +  def this(f: MapFunction[IN, java.lang.Double]) = this((x: IN) => 
f.call(x))
    +  def toColumnScala: TypedColumn[IN, Double] = {
    --- End diff --
    
    @cloud-fan I agree that's the best option. Made some slight changes but its 
implemented now.
    There is one issue  however I am stuck on: the tests for empty sets ("typed 
aggregate: empty") seem to be casting to nulls from options, resulting into the 
following:
    
    Decoded objects do not match expected objects:
    expected: WrappedArray([0.0,0,NaN,None,None,None,None])
    actual:   WrappedArray([0.0,0,NaN,[null],[null],[null],[null]])
    
    This doesn't happen to non-empty data sets. Do  you have any clue?



---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to