Github user mridulm commented on a diff in the pull request:

    https://github.com/apache/spark/pull/1609#discussion_r15565486
  
    --- Diff: 
core/src/main/scala/org/apache/spark/storage/BlockObjectWriter.scala ---
    @@ -107,68 +109,296 @@ private[spark] class DiskBlockObjectWriter(
       private var fos: FileOutputStream = null
       private var ts: TimeTrackingOutputStream = null
       private var objOut: SerializationStream = null
    +
    +  // Did we create this file or was it already present : used in revert to 
decide
    +  // if we should delete this file or not. Also used to detect if file was 
deleted
    +  // between creation of BOW and its actual init
    +  private val initiallyExists = file.exists() && file.isFile
       private val initialPosition = file.length()
       private var lastValidPosition = initialPosition
    +
       private var initialized = false
    +  // closed explicitly ?
    +  private var closed = false
    +  // Attempt to cleanly close ? (could also be closed via revert)
    +  // Note, a cleanly closed file could be subsequently reverted
    +  private var cleanCloseAttempted = false
    +  // Was the file actually opened atleast once.
    +  // Note: initialized/streams change state with close/revert.
    +  private var wasOpenedOnce = false
       private var _timeWriting = 0L
     
    -  override def open(): BlockObjectWriter = {
    -    fos = new FileOutputStream(file, true)
    -    ts = new TimeTrackingOutputStream(fos)
    -    channel = fos.getChannel()
    +  // Due to some directory creation race issues in spark, it has been 
observed that
    +  // sometimes file creation happens 'before' the actual directory has 
been created
    +  // So we attempt to retry atleast once with a mkdirs in case directory 
was missing.
    +  private def init() {
    +    init(canRetry = true)
    +  }
    +
    +  private def init(canRetry: Boolean) {
    +
    +    if (closed) throw new IOException("Already closed")
    +
    +    assert (! initialized)
    +    assert (! wasOpenedOnce)
    +    var exists = false
    +    try {
    +      exists = file.exists()
    +      if (! exists && initiallyExists && 0 != initialPosition && ! 
Utils.inShutdown) {
    +        // Was deleted by cleanup thread ?
    +        throw new IOException("file " + file + " cleaned up ? exists = " + 
exists +
    +          ", initiallyExists = " + initiallyExists + ", initialPosition = 
" + initialPosition)
    +      }
    +      fos = new FileOutputStream(file, true)
    +    } catch {
    +      case fEx: FileNotFoundException =>
    +        // There seems to be some race in directory creation.
    +        // Attempts to fix it dont seem to have worked : working around 
the problem for now.
    +        logDebug("Unable to open " + file + ", canRetry = " + canRetry + 
", exists = " + exists +
    +          ", initialPosition = " + initialPosition + ", in shutdown = " + 
Utils.inShutdown(), fEx)
    +        if (canRetry && ! Utils.inShutdown()) {
    +          // try creating the parent directory if that is the issue.
    +          // Since there can be race with others, dont bother checking for
    +          // success/failure - the call to init() will resolve if fos can 
be created.
    +          file.getParentFile.mkdirs()
    +          // Note, if directory did not exist, then file does not either - 
and so
    +          // initialPosition would be zero in either case.
    +          init(canRetry = false)
    +          return
    +        } else throw fEx
    +    }
    +
    +    try {
    +      // This is to workaround case where creation of object and actual 
init
    +      // (which can happen much later) happens after a delay and the 
cleanup thread
    +      // cleaned up the file.
    +      channel = fos.getChannel
    +      val fosPos = channel.position()
    +      if (initialPosition != fosPos) {
    +        throw new IOException("file cleaned up ? " + file.exists() + 
    +          ", initialpos = " + initialPosition +
    +          "current len = " + fosPos + ", in shutdown ? " + 
Utils.inShutdown)
    +      }
    +
    +      ts = new TimeTrackingOutputStream(fos)
    +      val bos = new BufferedOutputStream(ts, bufferSize)
    +      bs = compressStream(bos)
    +      objOut = serializer.newInstance().serializeStream(bs)
    +      initialized = true
    +      wasOpenedOnce = true;
    +    } finally {
    +      if (! initialized) {
    +        // failed, cleanup state.
    +        val tfos = fos
    +        updateCloseState()
    +        tfos.close()
    +      }
    +    }
    +  }
    +
    +  private def open(): BlockObjectWriter = {
    +    init()
         lastValidPosition = initialPosition
    -    bs = compressStream(new BufferedOutputStream(ts, bufferSize))
    -    objOut = serializer.newInstance().serializeStream(bs)
    -    initialized = true
         this
       }
     
    -  override def close() {
    -    if (initialized) {
    -      if (syncWrites) {
    -        // Force outstanding writes to disk and track how long it takes
    -        objOut.flush()
    +  private def updateCloseState() {
    +
    +    if (ts ne null) _timeWriting += ts.timeWriting
    --- End diff --
    
    The only reason to use eq/ne was to avoid the call to equals/canEquals/etc.
    Since spark ships with assertions turned on, this ends up being expensive 
since some of the codepaths are very frequent : and all changes in the 2G fix 
branch uses eq/ne.
    Not sure how relevant it is to this patch though ... I can revert to ==/!= 
if it is a problem !


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to