parthchandra commented on code in PR #3781:
URL: https://github.com/apache/datafusion-comet/pull/3781#discussion_r3023631570
##########
spark/src/test/scala/org/apache/comet/parquet/ParquetReadSuite.scala:
##########
@@ -981,9 +981,12 @@ abstract class ParquetReadSuite extends CometTestBase {
Seq(StructField("_1", LongType, false), StructField("_2",
DoubleType, false)))
withParquetDataFrame(data, schema = Some(readSchema)) { df =>
- // TODO: validate with Spark 3.x and
'usingDataFusionParquetExec=true'
- if (enableSchemaEvolution || CometConf.COMET_NATIVE_SCAN_IMPL
- .get(conf) == CometConf.SCAN_NATIVE_DATAFUSION) {
+ val scan = CometConf.COMET_NATIVE_SCAN_IMPL.get(conf)
+ val isNativeDataFusionScan =
+ scan == CometConf.SCAN_NATIVE_DATAFUSION || scan ==
CometConf.SCAN_AUTO
Review Comment:
nit: this is not strictly correct since we could still fallback to
`native_iceberg_compat` when mode is `AUTO`
##########
spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala:
##########
@@ -168,8 +168,13 @@ case class CometScanRule(session: SparkSession)
COMET_NATIVE_SCAN_IMPL.get() match {
case SCAN_AUTO =>
- // TODO add support for native_datafusion in the future
- nativeIcebergCompatScan(session, scanExec, r, hadoopConf)
+ nativeDataFusionScan(plan, session, scanExec, r, hadoopConf)
+ .orElse {
+ // clear explain info tags from the failed nativeDataFusionScan
+ // attempt so they don't leak into the fallback path
+ scanExec.unsetTagValue(CometExplainInfo.EXTENSION_INFO)
Review Comment:
I feel we should keep the reason, so that we know why we used
`native_iceberg_compat` instead of `native_datafusion`
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]