kazuyukitanimura commented on code in PR #2138:
URL: https://github.com/apache/datafusion-comet/pull/2138#discussion_r2274871706


##########
spark/src/test/scala/org/apache/comet/parquet/ParquetReadFromS3Suite.scala:
##########
@@ -107,15 +111,52 @@ class ParquetReadFromS3Suite extends CometTestBase with 
AdaptiveSparkPlanHelper
     val testFilePath = s"s3a://$testBucketName/data/test-file.parquet"
     writeTestParquetFile(testFilePath)
 
-    val df = 
spark.read.format("parquet").load(testFilePath).agg(sum(col("id")))
-    val scans = collect(df.queryExecution.executedPlan) {
-      case p: CometScanExec =>
-        p
-      case p: CometNativeScanExec =>
-        p
-    }
-    assert(scans.size == 1)
+    Seq(
+      CometConf.SCAN_NATIVE_COMET,
+      CometConf.SCAN_NATIVE_DATAFUSION,
+      CometConf.SCAN_NATIVE_ICEBERG_COMPAT).foreach(scanMode => {
+      withSQLConf(CometConf.COMET_NATIVE_SCAN_IMPL.key -> scanMode) {
+        val df = 
spark.read.format("parquet").load(testFilePath).agg(sum(col("id")))
+        val scans = collect(df.queryExecution.executedPlan) {
+          case p: CometScanExec =>
+            p
+          case p: CometNativeScanExec =>
+            p
+        }
+        assert(scans.size == 1)
+        assert(df.first().getLong(0) == 499500)
+      }
+    })
+  }
 
-    assert(df.first().getLong(0) == 499500)
+  private def writePartitionedTestParquetFile(filePath: String): Unit = {
+    val df = spark.range(0, 1000).withColumn("val", expr("concat('val#', id % 
10)"))
+    
df.write.format("parquet").partitionBy("val").mode(SaveMode.Overwrite).save(filePath)
+  }
+
+  test("write partitioned data and read from MinIO") {
+    val testFilePath = s"s3a://$testBucketName/data/test-partitioned"
+    writePartitionedTestParquetFile(testFilePath)
+
+    Seq(
+      CometConf.SCAN_NATIVE_COMET,
+      CometConf.SCAN_NATIVE_DATAFUSION,
+      CometConf.SCAN_NATIVE_ICEBERG_COMPAT).foreach(scanMode => {
+      withSQLConf(CometConf.COMET_NATIVE_SCAN_IMPL.key -> scanMode) {
+        val df =
+          spark.read.format("parquet").load(testFilePath).agg(sum(col("id")), 
max(col("val")))

Review Comment:
   Should we include a test with encoded url? 
   E.g. Brand%2321 
   This is based on your description 
   ```
    if the S3 path has escape sequences, it will corrupt the path and we'll end 
up getting an error, or silently reading the wrong data.
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org
For additional commands, e-mail: github-h...@datafusion.apache.org

Reply via email to