attilapiros commented on code in PR #36512:
URL: https://github.com/apache/spark/pull/36512#discussion_r900484149
##########
core/src/main/scala/org/apache/spark/storage/BlockManager.scala:
##########
@@ -933,46 +935,56 @@ private[spark] class BlockManager(
})
Some(new BlockResult(ci, DataReadMethod.Memory, info.size))
} else if (level.useDisk && diskStore.contains(blockId)) {
+ var diskData: BlockData = null
try {
- val diskData = diskStore.getBytes(blockId)
- val iterToReturn: Iterator[Any] = {
- if (level.deserialized) {
- val diskValues = serializerManager.dataDeserializeStream(
- blockId,
- diskData.toInputStream())(info.classTag)
- maybeCacheDiskValuesInMemory(info, blockId, level, diskValues)
- } else {
- val stream = maybeCacheDiskBytesInMemory(info, blockId, level,
diskData)
- .map { _.toInputStream(dispose = false) }
- .getOrElse { diskData.toInputStream() }
- serializerManager.dataDeserializeStream(blockId,
stream)(info.classTag)
- }
+ diskData = diskStore.getBytes(blockId)
+ val iterToReturn = if (level.deserialized) {
+ val diskValues = serializerManager.dataDeserializeStream(
+ blockId,
+ diskData.toInputStream())(info.classTag)
+ maybeCacheDiskValuesInMemory(info, blockId, level, diskValues)
+ } else {
+ val stream = maybeCacheDiskBytesInMemory(info, blockId, level,
diskData)
+ .map { _.toInputStream(dispose = false) }
+ .getOrElse { diskData.toInputStream() }
+ serializerManager.dataDeserializeStream(blockId,
stream)(info.classTag)
}
val ci = CompletionIterator[Any, Iterator[Any]](iterToReturn, {
releaseLockAndDispose(blockId, diskData, taskContext)
})
Some(new BlockResult(ci, DataReadMethod.Disk, info.size))
} catch {
- case ex: KryoException if ex.getCause.isInstanceOf[IOException] =>
- // We need to have detailed log message to catch environmental
problems easily.
- // Further details:
https://issues.apache.org/jira/browse/SPARK-37710
- processKryoException(ex, blockId)
- throw ex
+ case t: Throwable =>
+ if (diskData != null) {
+ diskData.dispose()
Review Comment:
Disposing `BlockData` means releasing the memory occupied by the loaded
block which just read from the disk via `DiskStore.getBytes(...)`.
For that memory area we have just a single reference `diskData` which goes
out of scope so we have have to act now.
But strictly speaking this does nothing as the current implementation of
`diskStore.getBytes(..)` gives back either a `EncryptedBlockData` or
`DiskBlockData`:
https://github.com/apache/spark/blob/5049fd35f43b053ff1c7e8694762c1988e3a8330/core/src/main/scala/org/apache/spark/storage/DiskStore.scala#L119-L127
and the `dispose` method is empty in these cases:
-
https://github.com/apache/spark/blob/5049fd35f43b053ff1c7e8694762c1988e3a8330/core/src/main/scala/org/apache/spark/storage/DiskStore.scala#L275
-
https://github.com/apache/spark/blob/5049fd35f43b053ff1c7e8694762c1988e3a8330/core/src/main/scala/org/apache/spark/storage/DiskStore.scala#L221
Still this is the correct way to do.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]