erenavsarogullari commented on a change in pull request #34980:
URL: https://github.com/apache/spark/pull/34980#discussion_r776123609
##########
File path: core/src/main/scala/org/apache/spark/storage/BlockManager.scala
##########
@@ -926,30 +933,48 @@ private[spark] class BlockManager(
})
Some(new BlockResult(ci, DataReadMethod.Memory, info.size))
} else if (level.useDisk && diskStore.contains(blockId)) {
- val diskData = diskStore.getBytes(blockId)
- val iterToReturn: Iterator[Any] = {
- if (level.deserialized) {
- val diskValues = serializerManager.dataDeserializeStream(
- blockId,
- diskData.toInputStream())(info.classTag)
- maybeCacheDiskValuesInMemory(info, blockId, level, diskValues)
- } else {
- val stream = maybeCacheDiskBytesInMemory(info, blockId, level,
diskData)
- .map { _.toInputStream(dispose = false) }
- .getOrElse { diskData.toInputStream() }
- serializerManager.dataDeserializeStream(blockId,
stream)(info.classTag)
+ try {
+ val diskData = diskStore.getBytes(blockId)
+ val iterToReturn: Iterator[Any] = {
+ if (level.deserialized) {
+ val diskValues = serializerManager.dataDeserializeStream(
+ blockId,
+ diskData.toInputStream())(info.classTag)
+ maybeCacheDiskValuesInMemory(info, blockId, level, diskValues)
+ } else {
+ val stream = maybeCacheDiskBytesInMemory(info, blockId, level,
diskData)
+ .map { _.toInputStream(dispose = false) }
+ .getOrElse { diskData.toInputStream() }
+ serializerManager.dataDeserializeStream(blockId,
stream)(info.classTag)
+ }
}
+ val ci = CompletionIterator[Any, Iterator[Any]](iterToReturn, {
+ releaseLockAndDispose(blockId, diskData, taskContext)
+ })
+ Some(new BlockResult(ci, DataReadMethod.Disk, info.size))
+ } catch {
+ case ex: KryoException if ex.getCause.isInstanceOf[IOException] =>
+ // We need to have clear error message to catch environmental
problems easily.
+ // Further details:
https://issues.apache.org/jira/browse/SPARK-37710
+ processKryoException(ex, blockId)
+ throw ex
}
- val ci = CompletionIterator[Any, Iterator[Any]](iterToReturn, {
- releaseLockAndDispose(blockId, diskData, taskContext)
- })
- Some(new BlockResult(ci, DataReadMethod.Disk, info.size))
} else {
handleLocalReadFailure(blockId)
}
}
}
+ private def processKryoException(ex: KryoException, blockId: BlockId): Unit
= {
+ var errorMessage = s"${ex.getMessage}. Please check if environment status
is healthy for " +
+ s"disk corruption, network failure (etc). ${blockManagerId.toString} -
blockName: $blockId"
+ if (diskBlockManager.containsBlock(blockId)) {
+ val file = diskBlockManager.getFile(blockId)
+ errorMessage = errorMessage + s" - blockDiskPath:
${file.getAbsolutePath}"
+ }
+ logError(errorMessage, ex)
Review comment:
Patch' s main goal is to provide hint by logging `BlockManager`,
`blockId` and `blockPath` for potential disk problems occurring on `Kryo
deserialization` flow. I think we just need to log problematic `BlockManager`
and `block` info so why using `error` log-level. (`ex` can be removed from
`logError` due to logging at `Executor$TaskRunner`.)
On the other hand, re-throwing exception due to avoid breaking current
execution flow (e.g: `BlockManager.getLocalValues` is called by
`TorrentBroadcast.readBroadcastBlock` and it also catches
`IOException/NanFatal`, `log` and `re-throw`). Current execution flow is as
follows:
```
FileInputStream => Compression API calls => Kryo Deserialization Flow =>
BlockManager => RDD => MapPartitionsRDD => ResultTask => Task =>
Executor$TaskRunner...
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]