c21 commented on a change in pull request #31413:
URL: https://github.com/apache/spark/pull/31413#discussion_r567589223



##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/sources/DisableUnnecessaryBucketedScanSuite.scala
##########
@@ -95,7 +95,7 @@ abstract class DisableUnnecessaryBucketedScanSuite
         ("SELECT i FROM t1", 0, 1),
         ("SELECT j FROM t1", 0, 0),
         // Filter on bucketed column
-        ("SELECT * FROM t1 WHERE i = 1", 1, 1),
+        ("SELECT * FROM t1 WHERE i = 1", 0, 1),

Review comment:
       This unit test change is expected, as we no longer need to do bucket 
scan for this kind of query. See related change in 
`DisableUnnecessaryBucketedScan.scala`

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
##########
@@ -148,19 +151,53 @@ abstract class BucketedReadSuite extends QueryTest with 
SQLTestUtils with Adapti
         if (invalidBuckets.nonEmpty) {
           fail(s"Buckets ${invalidBuckets.mkString(",")} should have been 
pruned from:\n$plan")
         }
+
+        withSQLConf(SQLConf.BUCKETING_ENABLED.key -> "false") {
+          // Bucket pruning should still work when bucketing is disabled
+          val planWithBucketDisabled = 
spark.table("bucketed_table").select("i", "j", "k").filter(filterCondition)
+            .queryExecution.executedPlan
+          val fileScanWithBucketDisabled = getFileScan(planWithBucketDisabled)
+          assert(!fileScanWithBucketDisabled.bucketedScan,
+            "except no bucketed scan when disabling bucketing but found\n" +
+              s"$fileScanWithBucketDisabled")
+
+          val tableSchema = fileScanWithBucketDisabled.schema
+          val bucketColumnIndex = 
tableSchema.fieldIndex(bucketColumnNames.head)
+          val bucketColumn = tableSchema.toAttributes(bucketColumnIndex)
+          val bucketColumnType = tableSchema.apply(bucketColumnIndex).dataType
+          val rowsWithInvalidBuckets = 
fileScanWithBucketDisabled.execute().filter(row => {
+            // Return rows should have been pruned
+            val bucketColumnValue = row.get(bucketColumnIndex, 
bucketColumnType)
+            val bucketId = BucketingUtils.getBucketIdFromValue(
+              bucketColumn, numBuckets, bucketColumnValue)
+            !matchedBuckets.get(bucketId)
+          }).collect()
+
+          if (rowsWithInvalidBuckets.nonEmpty) {
+            fail(s"Rows ${rowsWithInvalidBuckets.mkString(",")} should have 
been pruned from:\n" +
+              s"$planWithBucketDisabled")
+          }
+        }
       }
 
+      val expectedDataFrame = 
originalDataFrame.filter(filterCondition).orderBy("i", "j", "k")
       checkAnswer(
         bucketedDataFrame.filter(filterCondition).orderBy("i", "j", "k"),
-        originalDataFrame.filter(filterCondition).orderBy("i", "j", "k"))
+        expectedDataFrame)
+
+      withSQLConf(SQLConf.BUCKETING_ENABLED.key -> "false") {
+        checkAnswer(
+          spark.table("bucketed_table").select("i", "j", 
"k").filter(filterCondition)
+            .orderBy("i", "j", "k"),
+          expectedDataFrame)
+      }
     }
   }
 
   test("read partitioning bucketed tables with bucket pruning filters") {
     withTable("bucketed_table") {
       val numBuckets = NumBucketsForPruningDF
       val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
-      // json does not support predicate push-down, and thus json is used here

Review comment:
       This is not true anymore as json filter push down was added in 
https://issues.apache.org/jira/browse/SPARK-30648 .




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to