Github user cloud-fan commented on a diff in the pull request:
https://github.com/apache/spark/pull/22546#discussion_r220250120
--- Diff:
core/src/main/scala/org/apache/spark/util/io/ChunkedByteBuffer.scala ---
@@ -175,30 +174,32 @@ object ChunkedByteBuffer {
def fromManagedBuffer(data: ManagedBuffer, maxChunkSize: Int):
ChunkedByteBuffer = {
data match {
case f: FileSegmentManagedBuffer =>
- map(f.getFile, maxChunkSize, f.getOffset, f.getLength)
+ fromFile(f.getFile, maxChunkSize, f.getOffset, f.getLength)
case other =>
new ChunkedByteBuffer(other.nioByteBuffer())
}
}
- def map(file: File, maxChunkSize: Int): ChunkedByteBuffer = {
- map(file, maxChunkSize, 0, file.length())
+ def fromFile(file: File, maxChunkSize: Int): ChunkedByteBuffer = {
+ fromFile(file, maxChunkSize, 0, file.length())
}
- def map(file: File, maxChunkSize: Int, offset: Long, length: Long):
ChunkedByteBuffer = {
- Utils.tryWithResource(FileChannel.open(file.toPath,
StandardOpenOption.READ)) { channel =>
- var remaining = length
- var pos = offset
- val chunks = new ListBuffer[ByteBuffer]()
- while (remaining > 0) {
- val chunkSize = math.min(remaining, maxChunkSize)
- val chunk = channel.map(FileChannel.MapMode.READ_ONLY, pos,
chunkSize)
- pos += chunkSize
- remaining -= chunkSize
- chunks += chunk
- }
- new ChunkedByteBuffer(chunks.toArray)
+ def fromFile(file: File, maxChunkSize: Int, offset: Long, length: Long):
ChunkedByteBuffer = {
--- End diff --
should this be private?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]