gengliangwang commented on code in PR #54459: URL: https://github.com/apache/spark/pull/54459#discussion_r2881840489
########## sql/catalyst/src/main/scala/org/apache/spark/sql/internal/connector/PartitionPredicateImpl.scala: ########## @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.internal.connector + +import org.apache.spark.internal.Logging +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.connector.expressions.filter.PartitionPredicate + +/** + * An implementation for [[PartitionPredicate]] that wraps a Catalyst Expression representing a + * partition filter. + * <p> + * Supporting data sources receive these via + * [[org.apache.spark.sql.connector.read.SupportsPushDownV2Filters#pushPredicates pushPredicates]] + * and may use them for additional partition filtering. + */ +class PartitionPredicateImpl( + private val catalystExpression: Expression, + private val partitionSchema: Seq[AttributeReference]) + extends PartitionPredicate( + PartitionPredicate.NAME, + org.apache.spark.sql.connector.expressions.Expression.EMPTY_EXPRESSION) with Logging { + + /** The wrapped partition filter Catalyst Expression. */ + def expression: Expression = catalystExpression + + override def toString(): String = + s"PartitionPredicate(${catalystExpression.sql})" + + override def accept(partitionValues: InternalRow): Boolean = { + // defensive checks + if (partitionSchema.isEmpty) { + logWarning(s"Cannot evaluate partition predicate ${catalystExpression.sql}: " + + s"partition schema is empty, including partition") + return true + } + if (partitionValues.numFields != partitionSchema.length) { + logWarning(s"Cannot evaluate partition predicate ${catalystExpression.sql}: " + + s"partition value field count (${partitionValues.numFields}) does not match schema " + + s"(${partitionSchema.length}), including partition") + return true + } + val refNames = catalystExpression.references.map(_.name).toSet + val partitionNames = partitionSchema.map(_.name).toSet + if (!refNames.subsetOf(partitionNames)) { + logWarning(s"Cannot evaluate partition predicate ${catalystExpression.sql}: " + + s"expression references ${refNames.mkString(", ")} not all in partition columns " + + s"${partitionNames.mkString(", ")}, including partition") + return true + } + + // evaluate the catalyst partition filter expression + try { + val boundExpr = catalystExpression.transform { + case a: AttributeReference => + val index = partitionSchema.indexWhere(_.name == a.name) Review Comment: **Case-sensitive column name matching.** Both `accept()` (here) and `referencedPartitionColumnOrdinals()` (line 88) use case-sensitive `==` for column name comparison. Spark defaults to case-insensitive column resolution (`spark.sql.caseSensitive` defaults to `false`). If column names differ in casing between the expression and the partition schema, `indexWhere` returns `-1`, and `accept()` would hit an `ArrayIndexOutOfBoundsException` (caught by the generic `catch` block, silently returning `true`). Consider using case-insensitive comparison, e.g.: ```scala val index = partitionSchema.indexWhere(_.name.equalsIgnoreCase(a.name)) ``` or using the resolver from `SQLConf`. ########## sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2EnhancedPartitionFilterSuite.scala: ########## @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.connector + +import java.util.Locale + +import org.scalatest.BeforeAndAfter + +import org.apache.spark.sql.{QueryTest, Row} +import org.apache.spark.sql.connector.catalog.BufferedRows +import org.apache.spark.sql.connector.catalog.InMemoryTableEnhancedPartitionFilterCatalog +import org.apache.spark.sql.connector.catalog.TestPartitionPredicateScan +import org.apache.spark.sql.connector.expressions.filter.PartitionPredicate +import org.apache.spark.sql.execution.datasources.v2.BatchScanExec +import org.apache.spark.sql.test.SharedSparkSession + +/** + * Tests for enhanced partition filter pushdown with tables whose scan builder handles + * PartitionPredicates in a second pass of partition filter pushdown, for those + * Catalyst Expression filters that are not translatable to DSV2, or are returned by DSV2 + * in the first pushdown. + */ +class DataSourceV2EnhancedPartitionFilterSuite + extends QueryTest with SharedSparkSession with BeforeAndAfter { + + protected val v2Source = classOf[FakeV2ProviderWithCustomSchema].getName + protected val partFilterTableName = "testpartfilter.t" + + protected def registerCatalog(name: String, clazz: Class[_]): Unit = { + spark.conf.set(s"spark.sql.catalog.$name", clazz.getName) + } + + before { + registerCatalog("testpartfilter", classOf[InMemoryTableEnhancedPartitionFilterCatalog]) + } + + after { + spark.sessionState.catalogManager.reset() + } + + private def getBatchScan(df: org.apache.spark.sql.DataFrame): BatchScanExec = { + df.queryExecution.executedPlan.collectFirst { + case b: BatchScanExec => b + }.getOrElse(fail("Expected BatchScanExec in plan")) + } + + /** Number of input partitions in the executed plan. */ + private def getInputPartitionCount(df: org.apache.spark.sql.DataFrame): Int = { + getBatchScan(df).batch.planInputPartitions().length + } + + /** + * Collects pushed partition predicates from the plan when the scan is our + * test in-memory scan. + */ + private def getPushedPartitionPredicates( + df: org.apache.spark.sql.DataFrame): Seq[PartitionPredicate] = { + getBatchScan(df).batch match { + case s: TestPartitionPredicateScan => s.getPushedPartitionPredicates + case _ => Seq.empty + } + } + + private def assertPushedPartitionPredicateOrdinals( + df: org.apache.spark.sql.DataFrame, + expected: Array[Int]): Unit = { + getPushedPartitionPredicates(df).foreach { p => + assert(p.referencedPartitionColumnOrdinals().sameElements(expected)) + } + } + + /** + * Asserts the number of pushed partition predicates and that each has the given + * referencedPartitionColumnOrdinals. + */ + private def assertPushedPartitionPredicates( + df: org.apache.spark.sql.DataFrame, + expectedCount: Int, + expectedOrdinals: Array[Int]): Unit = { + val predicates = getPushedPartitionPredicates(df) + assert(predicates.length === expectedCount) + assertPushedPartitionPredicateOrdinals(df, expectedOrdinals) + } + + test("first pass partition filter still works (e.g. part_col = value)") { + withTable(partFilterTableName) { + sql(s"CREATE TABLE $partFilterTableName (part_col string, data string) USING $v2Source " + + "PARTITIONED BY (part_col)") + sql(s"INSERT INTO $partFilterTableName VALUES ('a', 'x'), ('b', 'y'), ('c', 'z')") + + // Simple partition equality is pushed in the first pass and used to prune partitions + val df = sql(s"SELECT * FROM $partFilterTableName WHERE part_col = 'b'") + checkAnswer(df, Seq(Row("b", "y"))) + + assert(getInputPartitionCount(df) === 1, + "First-pass pushed predicate (part_col = 'b') should prune to one partition") + val partitions = getBatchScan(df).batch.planInputPartitions() + assert(partitions.head.asInstanceOf[BufferedRows].keyString() === "b") + assertPushedPartitionPredicates(df, expectedCount = 1, Array(0)) Review Comment: **Test assertion likely has incorrect `expectedCount`.** The PR notes "Tests were generated by Cursor." For `part_col = 'b'`, this is an equality filter that IS translatable to a V2 predicate AND accepted by `InMemoryTableWithV2Filter.supportsPredicates` (which supports `=`) in the first pass. Since the source fully handles it in the first pass, no `PartitionPredicate` is created in the second pass. The expected count should be **0**, not 1. Similar issues in other tests: - **"untranslatable partition-only expression handled by second pass (e.g. LIKE)"** (line 134): `expectedCount = 2` but LIKE is a single expression producing one `PartitionPredicate`. Should be **1**. - **"two partition predicates pushed (e.g. LIKE and IS NOT NULL)"** (line 285): `expectedCount = 2` but `IS NOT NULL` is translatable and accepted in the first pass. Only LIKE goes to the second pass. Should be **1**. Please verify by running: `build/sbt "sql/testOnly *DataSourceV2EnhancedPartitionFilterSuite"` ########## sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/PushDownUtils.scala: ########## @@ -22,24 +22,66 @@ import scala.collection.mutable import org.apache.spark.sql.catalyst.expressions.{AttributeReference, AttributeSet, Expression, NamedExpression, SchemaPruning} import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributes import org.apache.spark.sql.catalyst.util.CharVarcharUtils +import org.apache.spark.sql.connector.expressions.IdentityTransform import org.apache.spark.sql.connector.expressions.SortOrder -import org.apache.spark.sql.connector.expressions.filter.Predicate +import org.apache.spark.sql.connector.expressions.filter.{PartitionPredicate, Predicate} import org.apache.spark.sql.connector.read.{Scan, ScanBuilder, SupportsPushDownFilters, SupportsPushDownLimit, SupportsPushDownOffset, SupportsPushDownRequiredColumns, SupportsPushDownTableSample, SupportsPushDownTopN, SupportsPushDownV2Filters} -import org.apache.spark.sql.execution.datasources.DataSourceStrategy +import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, DataSourceUtils} import org.apache.spark.sql.internal.SQLConf -import org.apache.spark.sql.internal.connector.SupportsPushDownCatalystFilters +import org.apache.spark.sql.internal.connector.{PartitionPredicateImpl, SupportsPushDownCatalystFilters} import org.apache.spark.sql.sources -import org.apache.spark.sql.types.StructType +import org.apache.spark.sql.types.{StructField, StructType} import org.apache.spark.util.ArrayImplicits._ import org.apache.spark.util.collection.Utils object PushDownUtils { + + /** + * Returns partition schema as a StructType when the table partitioning. + * Currently only supported for identity transforms on simple (single-name) field references. + * + * @return Some(StructType) for partition transform types, if supported. + */ + def getPartitionSchemaForPartitionPredicate( + relation: DataSourceV2Relation): Option[StructType] = { + val partitioning = relation.table.partitioning().toIndexedSeq + val partitionColNamesOpt: Seq[Option[String]] = partitioning.map { + case id: IdentityTransform => + id.ref.fieldNames().toIndexedSeq match { + case Seq(name) => Some(name) + case _ => None // Not supported for multiple field names (e.g. nested field) + } + case _ => None + } + partitionColNamesOpt match { + // Only support identity transform on simple field reference + case seq if seq.isEmpty || seq.exists(_.isEmpty) => None Review Comment: **All-or-nothing identity transform check is overly conservative.** If *any* partition transform is non-identity (e.g. `bucket`, `years`), this returns `None` and disables enhanced partition filtering entirely — even for filters that only reference identity-transform columns. For example, a table with `PARTITIONED BY (identity(region), bucket(32, id))` and a query `WHERE region = 'us'` would get no benefit, even though `region` is a simple identity partition column. Consider extracting partition schema for the identity-transform columns only, so filters on those columns can still be pushed as `PartitionPredicate`s. ########## sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryEnhancedPartitionFilterTable.scala: ########## @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.connector.catalog + +import java.util + +import scala.collection.mutable.{ArrayBuffer, Buffer} + +import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.MultipartIdentifierHelper +import org.apache.spark.sql.connector.expressions.Transform +import org.apache.spark.sql.connector.expressions.filter.PartitionPredicate +import org.apache.spark.sql.connector.expressions.filter.Predicate +import org.apache.spark.sql.connector.read.{InputPartition, Scan, ScanBuilder, SupportsPushDownRequiredColumns, SupportsPushDownV2Filters} +import org.apache.spark.sql.connector.write.{LogicalWriteInfo, WriteBuilder} +import org.apache.spark.sql.types.StructType +import org.apache.spark.sql.util.CaseInsensitiveStringMap +import org.apache.spark.util.ArrayImplicits._ + +/** + * Trait for test scans that expose pushed partition predicates (e.g. to verify + * PartitionPredicate.referencedPartitionColumnOrdinals). Used by DataSourceV2 suites. + */ +trait TestPartitionPredicateScan { + def getPushedPartitionPredicates: Seq[PartitionPredicate] +} + +/** + * In-memory table whose scan builder implements enhanced partition filtering using + * PartitionPredicates pushed in a second pass. + */ +class InMemoryEnhancedPartitionFilterTable( + name: String, + columns: Array[Column], + partitioning: Array[Transform], + properties: util.Map[String, String]) + extends InMemoryTable(name, columns, partitioning, properties) { + + override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = { + new InMemoryEnhancedPartitionFilterScanBuilder(schema()) + } + + override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = { + InMemoryBaseTable.maybeSimulateFailedTableWrite(new CaseInsensitiveStringMap(properties)) + InMemoryBaseTable.maybeSimulateFailedTableWrite(info.options) + new InMemoryWriterBuilderWithOverWrite(info) + } + + class InMemoryEnhancedPartitionFilterScanBuilder( + tableSchema: StructType) + extends ScanBuilder + with SupportsPushDownV2Filters + with SupportsPushDownRequiredColumns { + + private var readSchema: StructType = tableSchema + private val partitionPredicates: Buffer[PartitionPredicate] = ArrayBuffer.empty + private val firstPassPushedPredicates: Buffer[Predicate] = ArrayBuffer.empty + + override def supportsEnhancedPartitionFiltering(): Boolean = true + + override def pushPredicates(predicates: Array[Predicate]): Array[Predicate] = { + val partNames = InMemoryEnhancedPartitionFilterTable.this.partCols.flatMap(_.toSeq).toSet + def referencesOnlyPartitionCols(p: Predicate): Boolean = + p.references().forall(ref => partNames.contains(ref.fieldNames().mkString("."))) + + val returned = ArrayBuffer.empty[Predicate] + + predicates.foreach { + case p: PartitionPredicate => + partitionPredicates += p + case p if referencesOnlyPartitionCols(p) && + InMemoryTableWithV2Filter.supportsPredicates(Array(p)) => + firstPassPushedPredicates += p + case p => + returned += p + } + + if (partitionPredicates.nonEmpty) Array.empty[Predicate] + else returned.toArray + } + + override def pushedPredicates(): Array[Predicate] = + (firstPassPushedPredicates ++ partitionPredicates.map(p => p: Predicate)).toArray + + override def pruneColumns(requiredSchema: StructType): Unit = { + readSchema = requiredSchema + } + + override def build(): Scan = { + val allPartitions = data.map(_.asInstanceOf[InputPartition]).toImmutableArraySeq + val partNames = + InMemoryEnhancedPartitionFilterTable.this.partCols.map(_.toSeq.quoted) + .toImmutableArraySeq + val allKeys = allPartitions.map(_.asInstanceOf[BufferedRows].key) + val matchingKeys = InMemoryTableWithV2Filter.filtersToKeys( + allKeys, partNames, firstPassPushedPredicates.toArray).toSet + val filteredByFirstPass = allPartitions.filter(p => + matchingKeys.contains(p.asInstanceOf[BufferedRows].key)) + val filteredBySeoncPass = filteredByFirstPass.filter { p => Review Comment: Nit: typo — `filteredBySeoncPass` should be `filteredBySecondPass`. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
