aokolnychyi commented on code in PR #6622:
URL: https://github.com/apache/iceberg/pull/6622#discussion_r1096417802


##########
api/src/main/java/org/apache/iceberg/expressions/AggregateEvaluator.java:
##########
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.expressions;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.types.Types;
+
+public class AggregateEvaluator {

Review Comment:
   What about a short Javadoc explaining the purpose of the class?



##########
api/src/main/java/org/apache/iceberg/expressions/BoundAggregate.java:
##########
@@ -44,4 +57,93 @@ public Type type() {
       return term().type();
     }
   }
+
+  public String columnName() {
+    if (op() == Operation.COUNT_STAR) {
+      return "*";
+    } else {
+      return ref().name();
+    }
+  }
+
+  public boolean aggregateComplexType() {
+    if (op() == Operation.COUNT_STAR) {
+      return false;
+    } else {
+      return ref().type().isNestedType();
+    }
+  }
+
+  public String describe() {
+    switch (op()) {
+      case COUNT_STAR:
+        return "count(*)";
+      case COUNT:
+        return "count(" + ExpressionUtil.describe(term()) + ")";
+      case MAX:
+        return "max(" + ExpressionUtil.describe(term()) + ")";
+      case MIN:
+        return "min(" + ExpressionUtil.describe(term()) + ")";
+      default:
+        throw new UnsupportedOperationException("Unsupported aggregate type: " 
+ op());
+    }
+  }
+
+  <V> V safeGet(Map<Integer, V> map, int key) {
+    return safeGet(map, key, null);
+  }
+
+  <V> V safeGet(Map<Integer, V> map, int key, V defaultValue) {
+    if (map != null) {
+      return map.getOrDefault(key, defaultValue);
+    }
+
+    return null;
+  }
+
+  interface Aggregator<R> {
+    void update(StructLike struct);
+
+    void update(DataFile file);
+
+    R result();
+  }
+
+  abstract static class NullSafeAggregator<T, R> implements Aggregator<R> {

Review Comment:
   I haven't seen the tests yet but I think we want to cover all of 
[these](https://spark.apache.org/docs/3.3.0/sql-ref-null-semantics.html#builtin-aggregate-expressions-)
 scenarios.



##########
api/src/main/java/org/apache/iceberg/expressions/BoundAggregate.java:
##########
@@ -44,4 +57,85 @@ public Type type() {
       return term().type();
     }
   }
+
+  public String columnName() {
+    if (op() == Operation.COUNT_STAR) {
+      return "*";
+    } else {
+      return ref().name();
+    }
+  }
+
+  public String describe() {
+    switch (op()) {
+      case COUNT_STAR:
+        return "count(*)";
+      case COUNT:
+        return "count(" + ExpressionUtil.describe(term()) + ")";
+      case MAX:
+        return "max(" + ExpressionUtil.describe(term()) + ")";
+      case MIN:
+        return "min(" + ExpressionUtil.describe(term()) + ")";
+      default:
+        throw new UnsupportedOperationException("Unsupported aggregate type: " 
+ op());
+    }
+  }
+
+  <V> V safeGet(Map<Integer, V> map, int key) {
+    return safeGet(map, key, null);
+  }
+
+  <V> V safeGet(Map<Integer, V> map, int key, V defaultValue) {
+    if (map != null) {
+      return map.getOrDefault(key, defaultValue);
+    }
+
+    return null;
+  }
+
+  interface Aggregator<R> {
+    void update(StructLike struct);
+
+    void update(DataFile file);
+
+    R result();
+  }
+
+  abstract static class NullSafeAggregator<T, R> implements Aggregator<R> {
+    private final BoundAggregate<T, R> aggregate;
+    private boolean isNull = false;
+
+    NullSafeAggregator(BoundAggregate<T, R> aggregate) {
+      this.aggregate = aggregate;
+    }
+
+    protected abstract void update(R value);
+
+    protected abstract R current();
+
+    @Override
+    public void update(StructLike struct) {
+      if (!isNull) {
+        R value = aggregate.eval(struct);
+        update(value);
+      }
+    }
+
+    @Override
+    public void update(DataFile file) {
+      if (!isNull) {
+        R value = aggregate.eval(file);
+        update(value);

Review Comment:
   I am not sure I understand the purpose of `isNull` in this class then. Looks 
like we init it and never change?



##########
api/src/main/java/org/apache/iceberg/expressions/AggregateEvaluator.java:
##########
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.expressions;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.types.Types;
+
+public class AggregateEvaluator {
+
+  public static AggregateEvaluator create(List<BoundAggregate<?, ?>> 
aggregates) {
+    return new AggregateEvaluator(aggregates);
+  }
+
+  public static AggregateEvaluator create(Schema schema, List<Expression> 
aggregates) {
+    return create(schema.asStruct(), aggregates);
+  }
+
+  private static AggregateEvaluator create(Types.StructType struct, 
List<Expression> aggregates) {
+    List<BoundAggregate<?, ?>> boundAggregates =
+        aggregates.stream()
+            .map(expr -> Binder.bind(struct, expr))
+            .map(bound -> (BoundAggregate<?, ?>) bound)
+            .collect(Collectors.toList());
+
+    return new AggregateEvaluator(boundAggregates);
+  }
+
+  private final List<BoundAggregate.Aggregator<?>> aggregators;
+  private final Types.StructType resultType;
+  private final List<BoundAggregate<?, ?>> aggregates;
+
+  private AggregateEvaluator(List<BoundAggregate<?, ?>> aggregates) {
+    ImmutableList.Builder<BoundAggregate.Aggregator<?>> aggregatorsBuilder =

Review Comment:
   nit: What about a direct import for `Aggregator` to shorten the lines?
   
   ```
   ImmutableList.Builder<Aggregator<?>> aggregatorsBuilder = 
ImmutableList.builder();
   ```



##########
api/src/main/java/org/apache/iceberg/expressions/AggregateEvaluator.java:
##########
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.expressions;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.types.Types;
+
+public class AggregateEvaluator {
+
+  public static AggregateEvaluator create(List<BoundAggregate<?, ?>> 
aggregates) {
+    return new AggregateEvaluator(aggregates);
+  }
+
+  public static AggregateEvaluator create(Schema schema, List<Expression> 
aggregates) {
+    return create(schema.asStruct(), aggregates);
+  }
+
+  private static AggregateEvaluator create(Types.StructType struct, 
List<Expression> aggregates) {
+    List<BoundAggregate<?, ?>> boundAggregates =
+        aggregates.stream()
+            .map(expr -> Binder.bind(struct, expr))
+            .map(bound -> (BoundAggregate<?, ?>) bound)
+            .collect(Collectors.toList());
+
+    return new AggregateEvaluator(boundAggregates);
+  }
+
+  private final List<BoundAggregate.Aggregator<?>> aggregators;
+  private final Types.StructType resultType;
+  private final List<BoundAggregate<?, ?>> aggregates;
+
+  private AggregateEvaluator(List<BoundAggregate<?, ?>> aggregates) {
+    ImmutableList.Builder<BoundAggregate.Aggregator<?>> aggregatorsBuilder =
+        ImmutableList.builder();
+    List<Types.NestedField> resultFields = Lists.newArrayList();
+    for (int pos = 0; pos < aggregates.size(); pos += 1) {

Review Comment:
   nit: What about an empty line prior to the for loop for separate this block?



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkReadConf.java:
##########
@@ -243,4 +243,15 @@ public boolean preserveDataGrouping() {
         .defaultValue(SparkSQLProperties.PRESERVE_DATA_GROUPING_DEFAULT)
         .parse();
   }
+
+  public boolean aggregatePushDown() {

Review Comment:
   nit: `aggregatePushDown` -> `aggregatePushDownEnabled`?



##########
api/src/main/java/org/apache/iceberg/expressions/AggregateEvaluator.java:
##########
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.expressions;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.types.Types;
+
+public class AggregateEvaluator {
+
+  public static AggregateEvaluator create(List<BoundAggregate<?, ?>> 
aggregates) {
+    return new AggregateEvaluator(aggregates);
+  }
+
+  public static AggregateEvaluator create(Schema schema, List<Expression> 
aggregates) {
+    return create(schema.asStruct(), aggregates);
+  }
+
+  private static AggregateEvaluator create(Types.StructType struct, 
List<Expression> aggregates) {
+    List<BoundAggregate<?, ?>> boundAggregates =
+        aggregates.stream()
+            .map(expr -> Binder.bind(struct, expr))
+            .map(bound -> (BoundAggregate<?, ?>) bound)
+            .collect(Collectors.toList());
+
+    return new AggregateEvaluator(boundAggregates);
+  }
+
+  private final List<BoundAggregate.Aggregator<?>> aggregators;
+  private final Types.StructType resultType;
+  private final List<BoundAggregate<?, ?>> aggregates;
+
+  private AggregateEvaluator(List<BoundAggregate<?, ?>> aggregates) {
+    ImmutableList.Builder<BoundAggregate.Aggregator<?>> aggregatorsBuilder =
+        ImmutableList.builder();
+    List<Types.NestedField> resultFields = Lists.newArrayList();
+    for (int pos = 0; pos < aggregates.size(); pos += 1) {
+      BoundAggregate<?, ?> aggregate = aggregates.get(pos);
+      aggregatorsBuilder.add(aggregate.newAggregator());
+      resultFields.add(Types.NestedField.optional(pos, aggregate.describe(), 
aggregate.type()));
+    }
+
+    this.aggregators = aggregatorsBuilder.build();
+    this.resultType = Types.StructType.of(resultFields);
+    this.aggregates = aggregates;
+  }
+
+  public void update(StructLike struct) {
+    for (BoundAggregate.Aggregator<?> aggregator : aggregators) {
+      aggregator.update(struct);
+    }
+  }
+
+  public void update(DataFile file) {
+    for (BoundAggregate.Aggregator<?> aggregator : aggregators) {
+      aggregator.update(file);
+    }
+  }
+
+  public Types.StructType resultType() {
+    return resultType;
+  }
+
+  public StructLike resultStruct() {

Review Comment:
   Do we think this will be used in the future? I did not find where it is used 
right now.



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkReadConf.java:
##########
@@ -243,4 +243,15 @@ public boolean preserveDataGrouping() {
         .defaultValue(SparkSQLProperties.PRESERVE_DATA_GROUPING_DEFAULT)
         .parse();
   }
+
+  public boolean aggregatePushDown() {
+    boolean enable =
+        confParser
+            .booleanConf()
+            .option(SparkReadOptions.AGGREGATE_PUSH_DOWN_ENABLED)
+            .sessionConf(SparkSQLProperties.AGGREGATE_PUSH_DOWN_ENABLED)
+            
.defaultValue(SparkSQLProperties.AGGREGATE_PUSH_DOWN_ENABLED_DEFAULT)
+            .parse();
+    return enable;

Review Comment:
   Do we need this temp var? Why not return directly like in other methods of 
this class?
   
   ```
   return confParser
       .booleanConf()
       .option(SparkReadOptions.AGGREGATE_PUSH_DOWN_ENABLED)
       .sessionConf(SparkSQLProperties.AGGREGATE_PUSH_DOWN_ENABLED)
       .defaultValue(SparkSQLProperties.AGGREGATE_PUSH_DOWN_ENABLED_DEFAULT)
       .parse();
   ```



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkAggregates.java:
##########
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.source;
+
+import java.util.Map;
+import org.apache.iceberg.expressions.Expression;
+import org.apache.iceberg.expressions.Expression.Operation;
+import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
+import org.apache.iceberg.spark.SparkUtil;
+import org.apache.spark.sql.connector.expressions.NamedReference;
+import org.apache.spark.sql.connector.expressions.aggregate.AggregateFunc;
+import org.apache.spark.sql.connector.expressions.aggregate.Count;
+import org.apache.spark.sql.connector.expressions.aggregate.CountStar;
+import org.apache.spark.sql.connector.expressions.aggregate.Max;
+import org.apache.spark.sql.connector.expressions.aggregate.Min;
+
+public class SparkAggregates {
+
+  private SparkAggregates() {}
+
+  private static final Map<Class<? extends AggregateFunc>, Operation> 
AGGREGATES =
+      ImmutableMap.<Class<? extends AggregateFunc>, Operation>builder()
+          .put(Count.class, Operation.COUNT)
+          .put(CountStar.class, Operation.COUNT_STAR)
+          .put(Max.class, Operation.MAX)
+          .put(Min.class, Operation.MIN)
+          .build();
+
+  public static Expression convert(AggregateFunc aggregate) {
+    Operation op = AGGREGATES.get(aggregate.getClass());
+    if (op != null) {
+      switch (op) {
+        case COUNT:
+          Count countAgg = (Count) aggregate;
+          assert (countAgg.column() instanceof NamedReference);
+          return Expressions.count(SparkUtil.toColumnName((NamedReference) 
countAgg.column()));
+        case COUNT_STAR:
+          return Expressions.countStar();
+        case MAX:
+          Max maxAgg = (Max) aggregate;
+          assert (maxAgg.column() instanceof NamedReference);
+          return Expressions.max(SparkUtil.toColumnName((NamedReference) 
maxAgg.column()));
+        case MIN:
+          Min minAgg = (Min) aggregate;
+          assert (minAgg.column() instanceof NamedReference);
+          return Expressions.min(SparkUtil.toColumnName((NamedReference) 
minAgg.column()));
+      }
+    }
+    throw new UnsupportedOperationException("Invalid aggregate: " + aggregate);

Review Comment:
   Hm, is it a good idea to throw an exception? Would returning null be better?



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkReadOptions.java:
##########
@@ -90,4 +90,6 @@ private SparkReadOptions() {}
   public static final String VERSION_AS_OF = "versionAsOf";
 
   public static final String TIMESTAMP_AS_OF = "timestampAsOf";
+
+  public static final String AGGREGATE_PUSH_DOWN_ENABLED = 
"aggregatePushDownEnabled";

Review Comment:
   +1



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java:
##########
@@ -47,4 +47,8 @@ private SparkSQLProperties() {}
   public static final String PRESERVE_DATA_GROUPING =
       "spark.sql.iceberg.planning.preserve-data-grouping";
   public static final boolean PRESERVE_DATA_GROUPING_DEFAULT = false;
+
+  // Controls whether to push down aggregate (MAX/MIN/COUNT) to Iceberg
+  public static final String AGGREGATE_PUSH_DOWN_ENABLED = 
"spark.sql.iceberg.aggregate_pushdown";

Review Comment:
   +1



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to