c21 commented on a change in pull request #29625:
URL: https://github.com/apache/spark/pull/29625#discussion_r481754353



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanIterators.scala
##########
@@ -0,0 +1,316 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.datasources
+
+import java.io.{FileNotFoundException, IOException}
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+import org.apache.parquet.io.ParquetDecodingException
+
+import org.apache.spark.{Partition => RDDPartition, SparkUpgradeException, 
TaskContext}
+import org.apache.spark.deploy.SparkHadoopUtil
+import org.apache.spark.executor.InputMetrics
+import org.apache.spark.internal.Logging
+import org.apache.spark.rdd.InputFileBlockHolder
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.execution.{QueryExecutionException, RowIterator}
+import org.apache.spark.sql.vectorized.ColumnarBatch
+import org.apache.spark.util.NextIterator
+
+/**
+ * Holds common logic for iterators to scan files
+ */
+abstract class BaseFileScanIterator(
+    protected val split: RDDPartition,
+    context: TaskContext,
+    ignoreCorruptFiles: Boolean,
+    ignoreMissingFiles: Boolean,
+    readFunction: PartitionedFile => Iterator[InternalRow])
+  extends Iterator[Object]
+  with AutoCloseable
+  with Logging {
+
+  protected val inputMetrics: InputMetrics = context.taskMetrics().inputMetrics
+  private val existingBytesRead = inputMetrics.bytesRead
+
+  // Find a function that will return the FileSystem bytes read by this 
thread. Do this before
+  // apply readFunction, because it might read some bytes.
+  private val getBytesReadCallback =
+    SparkHadoopUtil.get.getFSBytesReadOnThreadCallback()
+
+  // We get our input bytes from thread-local Hadoop FileSystem statistics.
+  // If we do a coalesce, however, we are likely to compute multiple 
partitions in the same
+  // task and in the same thread, in which case we need to avoid override 
values written by
+  // previous partitions (SPARK-13071).
+  protected def incTaskInputMetricsBytesRead(): Unit = {
+    inputMetrics.setBytesRead(existingBytesRead + getBytesReadCallback())
+  }
+
+  private[this] val files = split.asInstanceOf[FilePartition].files.toIterator
+  protected[this] var currentFile: PartitionedFile = null
+  protected[this] var currentIterator: Iterator[Object] = null
+
+  override def hasNext: Boolean = {
+    // Kill the task in case it has been marked as killed. This logic is from
+    // InterruptibleIterator, but we inline it here instead of wrapping the 
iterator in order
+    // to avoid performance overhead.
+    context.killTaskIfInterrupted()
+    (currentIterator != null && currentIterator.hasNext) || nextIterator()
+  }
+
+  override def next(): Object
+
+  private def readFile(file: PartitionedFile): Iterator[InternalRow] = {
+    try {
+      readFunction(file)
+    } catch {
+      case e: FileNotFoundException =>
+        throw new FileNotFoundException(
+          e.getMessage + "\n" +
+            "It is possible the underlying files have been updated. " +
+            "You can explicitly invalidate the cache in Spark by " +
+            "running 'REFRESH TABLE tableName' command in SQL or " +
+            "by recreating the Dataset/DataFrame involved.")
+    }
+  }
+
+  /** Advances to the next file. Returns true if a new non-empty iterator is 
available. */
+  protected def nextIterator(): Boolean = {
+    if (files.hasNext) {
+      currentFile = files.next()
+      logInfo(s"Reading File $currentFile")
+      // Sets InputFileBlockHolder for the file block's information
+      InputFileBlockHolder.set(currentFile.filePath, currentFile.start, 
currentFile.length)
+
+      if (ignoreMissingFiles || ignoreCorruptFiles) {
+        currentIterator = new NextIterator[Object] {
+          private val file = currentFile
+          // The readFunction may read some bytes before consuming the 
iterator, e.g.,
+          // vectorized Parquet reader. Here we use lazy val to delay the 
creation of
+          // iterator so that we will throw exception in `getNext`.
+          private lazy val internalIter = readFile(file)
+
+          override def getNext(): AnyRef = {
+            try {
+              if (internalIter.hasNext) {
+                internalIter.next()
+              } else {
+                finished = true
+                null
+              }
+            } catch {
+              case e: FileNotFoundException if ignoreMissingFiles =>
+                logWarning(s"Skipped missing file: $currentFile", e)
+                finished = true
+                null
+              // Throw FileNotFoundException even if `ignoreCorruptFiles` is 
true
+              case e: FileNotFoundException if !ignoreMissingFiles => throw e
+              case e @ (_: RuntimeException | _: IOException) if 
ignoreCorruptFiles =>
+                logWarning(
+                  s"Skipped the rest of the content in the corrupted file: 
$currentFile", e)
+                finished = true
+                null
+            }
+          }
+
+          override def close(): Unit = {}
+        }
+      } else {
+        currentIterator = readFile(currentFile)
+      }
+
+      try {
+        currentIterator.hasNext

Review comment:
       This is changed from `hasNext` to `currentIterator.hasNext` because 
`FileSortedBucketScanIterator` needs to override `hasNext`. Given 
`currentIterator` is not null here anyway, this change should be a no-op. 




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to