Github user tdas commented on a diff in the pull request:
https://github.com/apache/spark/pull/3389#discussion_r20685400
--- Diff:
streaming/src/main/scala/org/apache/spark/streaming/dstream/FileInputDStream.scala
---
@@ -155,83 +186,81 @@ class FileInputDStream[K: ClassTag, V: ClassTag, F <:
NewInputFormat[K,V] : Clas
private def readObject(ois: ObjectInputStream): Unit =
Utils.tryOrIOException {
logDebug(this.getClass().getSimpleName + ".readObject used")
ois.defaultReadObject()
+ allFoundFiles = new mutable.HashSet[String]()
generatedRDDs = new HashMap[Time, RDD[(K,V)]] ()
- files = new HashMap[Time, Array[String]]
- fileModTimes = new TimeStampedHashMap[String, Long](true)
+ timeToSelectedFileInfo = new HashMap[Time, SelectedFileInfo]
+ fileToModTimes = new TimeStampedHashMap[String,
Long](updateTimeStampOnGet = true)
}
/**
- * A custom version of the DStreamCheckpointData that stores names of
- * Hadoop files as checkpoint data.
+ * A custom version of the DStreamCheckpointData that stores the
information about the
+ * files selected in every batch. This is necessary so that the files
selected for the past
+ * batches (that have already been defined) can be recovered correctly
upon driver failure and
+ * the input data of the batches are exactly the same.
*/
private[streaming]
class FileInputDStreamCheckpointData extends DStreamCheckpointData(this)
{
- def hadoopFiles = data.asInstanceOf[HashMap[Time, Array[String]]]
+ def checkpointedFileInfo = data.asInstanceOf[HashMap[Time,
SelectedFileInfo]]
override def update(time: Time) {
- hadoopFiles.clear()
- hadoopFiles ++= files
+ checkpointedFileInfo.clear()
+ checkpointedFileInfo ++= timeToSelectedFileInfo
}
override def cleanup(time: Time) { }
override def restore() {
- hadoopFiles.toSeq.sortBy(_._1)(Time.ordering).foreach {
+ checkpointedFileInfo.toSeq.sortBy(_._1)(Time.ordering).foreach {
case (t, f) => {
// Restore the metadata in both files and generatedRDDs
- logInfo("Restoring files for time " + t + " - " +
- f.mkString("[", ", ", "]") )
- files += ((t, f))
- generatedRDDs += ((t, filesToRDD(f)))
+ logInfo(s"Restoring files for time $t - ${f.files.mkString(",
")}")
+ timeToSelectedFileInfo += ((t, f))
+ allFoundFiles ++= f.files
+ generatedRDDs += ((t, filesToRDD(f.files)))
}
}
}
override def toString() = {
- "[\n" + hadoopFiles.size + " file sets\n" +
- hadoopFiles.map(p => (p._1, p._2.mkString(", "))).mkString("\n") +
"\n]"
+ "[\n" + checkpointedFileInfo.size + " file sets\n" +
+ checkpointedFileInfo.map(p => (p._1, p._2.files.mkString(",
"))).mkString("\n") + "\n]"
}
}
/**
- * Custom PathFilter class to find new files that
- * ... have modification time more than ignore time
- * ... have not been seen in the last interval
- * ... have modification time less than maxModTime
+ * Custom PathFilter class to find new files that have modification time
within the
+ * remember window (that is mod time > ignore threshold) and have not
been selected in that
+ * window.
*/
- private[streaming]
- class CustomPathFilter(maxModTime: Long) extends PathFilter {
+ private class CustomPathFilter(modTimeIgnoreThreshold: Long) extends
PathFilter {
--- End diff --
It might be better to keep the name generic like "NewFilePathFilter" (docs
take care of the rest), and pass the `allFoundDiles` and `fileModTimes` as
constructor arguments.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]