Github user holdenk commented on a diff in the pull request:
https://github.com/apache/spark/pull/12243#discussion_r58957584
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanRDD.scala
---
@@ -46,37 +50,87 @@ case class PartitionedFile(
*/
case class FilePartition(index: Int, files: Seq[PartitionedFile]) extends
Partition
+object FileScanRDD {
+ private val ioExecutionContext = ExecutionContext.fromExecutorService(
+ ThreadUtils.newDaemonCachedThreadPool("FileScanRDD", 16))
+}
+
class FileScanRDD(
@transient val sqlContext: SQLContext,
readFunction: (PartitionedFile) => Iterator[InternalRow],
@transient val filePartitions: Seq[FilePartition])
extends RDD[InternalRow](sqlContext.sparkContext, Nil) {
+ /**
+ * To get better interleaving of CPU and IO, this RDD will create a
future to prepare the next
+ * file while the current one is being processed. `currentIterator` is
the current file and
+ * `nextFile` is the future that will initialize the next file to be
read. This includes things
+ * such as starting up connections to open the file and any initial
buffering. The expectation
+ * is that `currentIterator` is CPU intensive and `nextFile` is IO
intensive.
+ */
+ val asyncIO = sqlContext.conf.filesAsyncIO
+
+ case class NextFile(file: PartitionedFile, iter: Iterator[Object])
+
override def compute(split: Partition, context: TaskContext):
Iterator[InternalRow] = {
val iterator = new Iterator[Object] with AutoCloseable {
private[this] val files =
split.asInstanceOf[FilePartition].files.toIterator
+ // TODO: do we need to close this?
private[this] var currentIterator: Iterator[Object] = null
+ private[this] var nextFile: Future[NextFile] = if (asyncIO)
prepareNextFile() else null
+
def hasNext = (currentIterator != null && currentIterator.hasNext)
|| nextIterator()
def next() = currentIterator.next()
/** Advances to the next file. Returns true if a new non-empty
iterator is available. */
private def nextIterator(): Boolean = {
- if (files.hasNext) {
- val nextFile = files.next()
- logInfo(s"Reading File $nextFile")
- SqlNewHadoopRDDState.setInputFileName(nextFile.filePath)
- currentIterator = readFunction(nextFile)
- hasNext
+ if (asyncIO) {
+ if (nextFile == null) return false
+ } else {
+ if (!files.hasNext) return false
+ }
+
+ // Wait for the async task to complete
+ val file = if (asyncIO) {
+ Await.result(nextFile, Duration.Inf)
+ } else {
+ val f = files.next()
+ val it = readFunction(f)
+ NextFile(f, it)
+ }
+
+ // This is only used to evaluate the rest of the execution so we
can safely set it here.
+ SqlNewHadoopRDDState.setInputFileName(file.file.filePath)
+ currentIterator = file.iter
+
+ if (asyncIO && files.hasNext) {
+ // Asynchronously start the next file.
+ nextFile = prepareNextFile()
} else {
- SqlNewHadoopRDDState.unsetInputFileName()
- false
+ nextFile = null
--- End diff --
So we are going to keep setting nextFile to null every nextIterator call if
asyncIO is false, could we change this to:
`if (asyncIO) {
if (files.hasNext) {
nextFile = prepareNextFile()
} else {
nextFile = null
}
}
`
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]