Github user mallman commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16578#discussion_r148725152
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaPruning.scala
 ---
    @@ -0,0 +1,147 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.sql.execution.datasources.parquet
    +
    +import org.apache.spark.sql.catalyst.expressions.{And, Attribute, 
Expression, NamedExpression}
    +import org.apache.spark.sql.catalyst.planning.{PhysicalOperation, 
ProjectionOverSchema, SelectedField}
    +import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, 
Project}
    +import org.apache.spark.sql.catalyst.rules.Rule
    +import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, 
LogicalRelation}
    +import org.apache.spark.sql.types.{ArrayType, DataType, MapType, 
StructField, StructType}
    +
    +/**
    + * Prunes unnecessary Parquet columns given a [[PhysicalOperation]] over a
    + * [[ParquetRelation]]. By "Parquet column", we mean a column as defined 
in the
    + * Parquet format. In Spark SQL, a root-level Parquet column corresponds 
to a
    + * SQL column, and a nested Parquet column corresponds to a 
[[StructField]].
    + */
    +private[sql] object ParquetSchemaPruning extends Rule[LogicalPlan] {
    +  override def apply(plan: LogicalPlan): LogicalPlan =
    +    plan transformDown {
    +      case op @ PhysicalOperation(projects, filters,
    +          l @ LogicalRelation(hadoopFsRelation @ HadoopFsRelation(_, 
partitionSchema,
    +            dataSchema, _, parquetFormat: ParquetFileFormat, _), _, _, _)) 
=>
    +        val projectionFields = projects.flatMap(getFields)
    +        val filterFields = filters.flatMap(getFields)
    +        val requestedFields = (projectionFields ++ filterFields).distinct
    +
    +        // If [[requestedFields]] includes a nested field, continue. 
Otherwise,
    +        // return [[op]]
    +        if (requestedFields.exists { case (_, optAtt) => optAtt.isEmpty }) 
{
    +          val prunedSchema = requestedFields
    +            .map { case (field, _) => StructType(Array(field)) }
    +            .reduceLeft(_ merge _)
    +          val dataSchemaFieldNames = dataSchema.fieldNames.toSet
    +          val prunedDataSchema =
    +            StructType(prunedSchema.filter(f => 
dataSchemaFieldNames.contains(f.name)))
    +
    +          // If the data schema is different from the pruned data schema, 
continue. Otherwise,
    +          // return [[op]]. We effect this comparison by counting the 
number of "leaf" fields in
    +          // each schemata, assuming the fields in [[prunedDataSchema]] 
are a subset of the fields
    +          // in [[dataSchema]].
    +          if (countLeaves(dataSchema) > countLeaves(prunedDataSchema)) {
    +            val prunedParquetRelation =
    +              hadoopFsRelation.copy(dataSchema = 
prunedDataSchema)(hadoopFsRelation.sparkSession)
    +
    +            // We need to replace the expression ids of the pruned 
relation output attributes
    +            // with the expression ids of the original relation output 
attributes so that
    +            // references to the original relation's output are not broken
    +            val outputIdMap = l.output.map(att => (att.name, 
att.exprId)).toMap
    +            val prunedRelationOutput =
    --- End diff --
    
    Top-level attributes of struct type whose type has been pruned need to be 
replaced in the logical relation's output. The pruned attributes are 
constructed in the `toAttributes` method call on the pruned schema. The 
expression ids of these replacement attributes are altered to the expression 
ids of the original attributes.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to