liancheng commented on a change in pull request #29831:
URL: https://github.com/apache/spark/pull/29831#discussion_r492476018



##########
File path: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala
##########
@@ -46,30 +46,42 @@ abstract class PrunePartitionSuiteBase extends QueryTest 
with SQLTestUtils with
         }
 
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2)
+          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4)
+           "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 
4, 0)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 
2)
+          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 
2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 
3)
+          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 
3, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t", 4)
+          "SELECT * FROM t", 4, expectedPushedDownFilterCount = 0)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE p = '1' AND i = 2", 1)
+          "SELECT * FROM t WHERE p = '1' AND i = 2", 1, 2)
         assertPrunedPartitions(
           """
             |SELECT i, COUNT(1) FROM (
             |SELECT * FROM t WHERE  p = '1' OR (p = '2' AND i = 1)
             |) tmp GROUP BY i
-          """.stripMargin, 2)
+          """.stripMargin, 2, 1)
       }
     }
   }
 
-  protected def assertPrunedPartitions(query: String, expected: Long): Unit = {
-    val plan = sql(query).queryExecution.sparkPlan
-    assert(getScanExecPartitionSize(plan) == expected)
+  protected def assertPrunedPartitions(
+      query: String,
+      expectedPartitionCount: Long,
+      expectedPushedDownFilterCount: Int): Unit = {
+    val qe = sql(query).queryExecution
+    val plan = qe.sparkPlan
+    assert(getScanExecPartitionSize(plan) == expectedPartitionCount)
+    val pushedDownPartitionFilters = qe.executedPlan.collectFirst {
+      case FileSourceScanExec(_, _, _, partitionFilters, _, _, _, _) =>
+        partitionFilters
+      case HiveTableScanExec(_, _, partitionFilters) =>
+        partitionFilters

Review comment:
       ```suggestion
         case scan: FileSourceScanExec => scan.partitionFilters
         case scan: HiveTableScanExec => scan.partitionFilters
   ```
   
   I'd prefer the above style for the following reasons:
   
   1. It's more resilient to future physical plan changes, e.g., adding or 
removing fields.
   2. Shorter.
   

##########
File path: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala
##########
@@ -46,30 +46,42 @@ abstract class PrunePartitionSuiteBase extends QueryTest 
with SQLTestUtils with
         }
 
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2)
+          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4)
+           "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 
4, 0)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 
2)
+          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 
2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 
3)
+          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 
3, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t", 4)
+          "SELECT * FROM t", 4, expectedPushedDownFilterCount = 0)

Review comment:
       Why only adding the parameter name for this line?

##########
File path: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala
##########
@@ -46,30 +46,42 @@ abstract class PrunePartitionSuiteBase extends QueryTest 
with SQLTestUtils with
         }
 
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2)
+          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4)
+           "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 
4, 0)

Review comment:
       Nit: Extra space.

##########
File path: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala
##########
@@ -46,30 +46,42 @@ abstract class PrunePartitionSuiteBase extends QueryTest 
with SQLTestUtils with
         }
 
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2)
+          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4)
+           "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 
4, 0)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 
2)
+          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 
2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 
3)
+          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 
3, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t", 4)
+          "SELECT * FROM t", 4, expectedPushedDownFilterCount = 0)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE p = '1' AND i = 2", 1)
+          "SELECT * FROM t WHERE p = '1' AND i = 2", 1, 2)
         assertPrunedPartitions(
           """
             |SELECT i, COUNT(1) FROM (
             |SELECT * FROM t WHERE  p = '1' OR (p = '2' AND i = 1)
             |) tmp GROUP BY i
-          """.stripMargin, 2)
+          """.stripMargin, 2, 1)
       }
     }
   }
 
-  protected def assertPrunedPartitions(query: String, expected: Long): Unit = {
-    val plan = sql(query).queryExecution.sparkPlan
-    assert(getScanExecPartitionSize(plan) == expected)
+  protected def assertPrunedPartitions(
+      query: String,
+      expectedPartitionCount: Long,
+      expectedPushedDownFilterCount: Int): Unit = {
+    val qe = sql(query).queryExecution
+    val plan = qe.sparkPlan
+    assert(getScanExecPartitionSize(plan) == expectedPartitionCount)
+    val pushedDownPartitionFilters = qe.executedPlan.collectFirst {
+      case FileSourceScanExec(_, _, _, partitionFilters, _, _, _, _) =>
+        partitionFilters
+      case HiveTableScanExec(_, _, partitionFilters) =>
+        partitionFilters
+    }
+    assert(pushedDownPartitionFilters.isDefined &&
+      pushedDownPartitionFilters.get.length == expectedPushedDownFilterCount)

Review comment:
       How about:
   
   ```suggestion
       assert(pushedDownPartitionFilters == Some(expectedPushedDownFilterCount))
   ```
   
   For the current assertion, if it fails, it's hard to tell which condition 
fails.

##########
File path: 
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala
##########
@@ -254,22 +254,29 @@ private[hive] trait HiveStrategies {
    * Retrieves data using a HiveTableScan.  Partition pruning predicates are 
also detected and
    * applied.
    */
-  object HiveTableScans extends Strategy {
+  object HiveTableScans extends Strategy with PredicateHelper {
     def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
-      case ScanOperation(projectList, predicates, relation: HiveTableRelation) 
=>
+      case ScanOperation(projectList, filters, relation: HiveTableRelation) =>
         // Filter out all predicates that only deal with partition keys, these 
are given to the
         // hive table scan operator to be used for partition pruning.
         val partitionKeyIds = AttributeSet(relation.partitionCols)
-        val (pruningPredicates, otherPredicates) = predicates.partition { 
predicate =>
-          !predicate.references.isEmpty &&
-          predicate.references.subsetOf(partitionKeyIds)
+        val normalized = DataSourceStrategy.normalizeExprs(
+          filters.filter(_.deterministic), relation.output)
+        val partitionKeyFilters = if (relation.partitionCols.isEmpty) {
+          ExpressionSet(Nil)
+        } else {
+          val predicates = ExpressionSet(normalized
+            .flatMap(extractPredicatesWithinOutputSet(_, partitionKeyIds)))
+            .filter(_.references.subsetOf(partitionKeyIds))
+          logInfo(s"Pruning directories with: ${filters.mkString(",")}")

Review comment:
       ```suggestion
             logInfo(s"Pruning partitions with: ${filters.mkString(",")}")
   ```
   
   (Conceptually, partitioning can be implemented using mechanisms other than 
filesystem directories.)

##########
File path: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala
##########
@@ -46,30 +46,42 @@ abstract class PrunePartitionSuiteBase extends QueryTest 
with SQLTestUtils with
         }
 
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2)
+          "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4)
+           "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 
4, 0)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 
2)
+          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 
2, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 
3)
+          "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 
3, 1)
         assertPrunedPartitions(
-          "SELECT * FROM t", 4)
+          "SELECT * FROM t", 4, expectedPushedDownFilterCount = 0)
         assertPrunedPartitions(
-          "SELECT * FROM t WHERE p = '1' AND i = 2", 1)
+          "SELECT * FROM t WHERE p = '1' AND i = 2", 1, 2)
         assertPrunedPartitions(
           """
             |SELECT i, COUNT(1) FROM (
             |SELECT * FROM t WHERE  p = '1' OR (p = '2' AND i = 1)
             |) tmp GROUP BY i
-          """.stripMargin, 2)
+          """.stripMargin, 2, 1)
       }
     }
   }
 
-  protected def assertPrunedPartitions(query: String, expected: Long): Unit = {
-    val plan = sql(query).queryExecution.sparkPlan
-    assert(getScanExecPartitionSize(plan) == expected)
+  protected def assertPrunedPartitions(
+      query: String,
+      expectedPartitionCount: Long,
+      expectedPushedDownFilterCount: Int): Unit = {
+    val qe = sql(query).queryExecution
+    val plan = qe.sparkPlan
+    assert(getScanExecPartitionSize(plan) == expectedPartitionCount)
+    val pushedDownPartitionFilters = qe.executedPlan.collectFirst {
+      case FileSourceScanExec(_, _, _, partitionFilters, _, _, _, _) =>
+        partitionFilters
+      case HiveTableScanExec(_, _, partitionFilters) =>
+        partitionFilters

Review comment:
       ```suggestion
         case scan: FileSourceScanExec => scan.partitionFilters
         case scan: HiveTableScanExec => scan.partitionFilters
   ```
   
   I'd prefer the above style for the following reasons:
   
   1. It's more resilient to future physical plan changes, e.g., adding, 
removing, or reordering fields.
   2. Shorter.
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to