Github user cloud-fan commented on a diff in the pull request:
https://github.com/apache/spark/pull/18855#discussion_r133384047
--- Diff: core/src/main/scala/org/apache/spark/storage/DiskStore.scala ---
@@ -165,6 +149,64 @@ private[spark] class DiskStore(
}
+private class DiskBlockData(
+ minMemoryMapBytes : Long,
+ maxMemoryMapBytes : Long,
+ file: File,
+ blockSize: Long) extends BlockData {
+
+ override def toInputStream(): InputStream = new FileInputStream(file)
+
+ /**
+ * Returns a Netty-friendly wrapper for the block's data.
+ *
+ * Please see `ManagedBuffer.convertToNetty()` for more details.
+ */
+ override def toNetty(): AnyRef = new DefaultFileRegion(file, 0, size)
+
+ override def toChunkedByteBuffer(allocator: (Int) => ByteBuffer):
ChunkedByteBuffer = {
+ Utils.tryWithResource(open()) { channel =>
+ var remaining = blockSize
+ val chunks = new ListBuffer[ByteBuffer]()
+ while (remaining > 0) {
+ val chunkSize = math.min(remaining, maxMemoryMapBytes)
+ val chunk = allocator(chunkSize.toInt)
+ remaining -= chunkSize
+ JavaUtils.readFully(channel, chunk)
+ chunk.flip()
+ chunks += chunk
+ }
+ new ChunkedByteBuffer(chunks.toArray)
+ }
+ }
+
+ override def toByteBuffer(): ByteBuffer = {
+ // I chose to leave to original error message here
+ // since users are unfamiliar with the configureation key
+ // controling maxMemoryMapBytes for tests
+ require(blockSize < maxMemoryMapBytes,
--- End diff --
why we need this? I think we can see the original error message if we don't
have this check and go to the memory map code path.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]