parthchandra commented on code in PR #1103: URL: https://github.com/apache/datafusion-comet/pull/1103#discussion_r1849161016
########## spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala: ########## @@ -2507,23 +2508,22 @@ object QueryPlanSerde extends Logging with ShimQueryPlanSerde with CometExprShim partitions.foreach(p => { val inputPartitions = p.asInstanceOf[DataSourceRDDPartition].inputPartitions inputPartitions.foreach(partition => { - partition2Proto(partition.asInstanceOf[FilePartition], nativeScanBuilder) + partition2Proto(partition.asInstanceOf[FilePartition], nativeScanBuilder, scan) }) }) case rdd: FileScanRDD => rdd.filePartitions.foreach(partition => { - partition2Proto(partition, nativeScanBuilder) + partition2Proto(partition, nativeScanBuilder, scan) }) case _ => + assert(false) } - val requiredSchemaParquet = - new SparkToParquetSchemaConverter(conf).convert(scan.requiredSchema) - val dataSchemaParquet = - new SparkToParquetSchemaConverter(conf).convert(scan.relation.dataSchema) + val projection_vector: Array[java.lang.Long] = scan.requiredSchema.fields.map(field => { Review Comment: This change essentially means that any schema 'adaptation' made in `SparkToParquetSchemaConverter.convert` to support legacy timestamps and decimals will not be supported. But we will probably fail tests with incorrect results. Also, Comet's Parquet file reader uses `CometParquetReadSupport.clipParquetSchema` to do similar conversion and it includes support for Parquet [field_id](https://github.com/apache/parquet-format/blob/c70281359087dfaee8bd43bed9748675f4aabe11/src/main/thrift/parquet.thrift#L473) which is desirable for delta sources like Iceberg. Basically a field_id, if present, identifies a field more precisely (in the event of field name changes) in a schema. ########## spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala: ########## @@ -3191,9 +3191,25 @@ object QueryPlanSerde extends Logging with ShimQueryPlanSerde with CometExprShim private def partition2Proto( partition: FilePartition, - nativeScanBuilder: OperatorOuterClass.NativeScan.Builder): Unit = { + nativeScanBuilder: OperatorOuterClass.NativeScan.Builder, + scan: CometScanExec): Unit = { val partitionBuilder = OperatorOuterClass.SparkFilePartition.newBuilder() + val sparkContext = scan.session.sparkContext + var schema_saved: Boolean = false; partition.files.foreach(file => { + if (!schema_saved) { + // TODO: This code shouldn't be here, but for POC it's fine. + // Extract the schema and stash it. + val hadoopConf = + scan.relation.sparkSession.sessionState.newHadoopConfWithOptions(scan.relation.options) + val broadcastedHadoopConf = + sparkContext.broadcast(new SerializableConfiguration(hadoopConf)) + val sharedConf = broadcastedHadoopConf.value.value + val footer = FooterReader.readFooter(sharedConf, file) Review Comment: You're right. This can never be in production code. For one, this is expensive. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org For additional commands, e-mail: github-h...@datafusion.apache.org