This is an automated email from the ASF dual-hosted git repository. ggregory pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/commons-io.git
The following commit(s) were added to refs/heads/master by this push: new 8cbe373 [IO-510] Add and adapt ReadAheadInputStream and BufferedFileChannelInputStream from Apache Spark. 8cbe373 is described below commit 8cbe3734f83b9d760eaf07c03d19348327efa14a Author: Gary Gregory <gardgreg...@gmail.com> AuthorDate: Thu Oct 29 21:27:21 2020 -0400 [IO-510] Add and adapt ReadAheadInputStream and BufferedFileChannelInputStream from Apache Spark. Javadoc and better method names. --- .../io/input/BufferedFileChannelInputStream.java | 34 ++++++++++++---------- .../commons/io/input/ReadAheadInputStream.java | 3 ++ 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/src/main/java/org/apache/commons/io/input/BufferedFileChannelInputStream.java b/src/main/java/org/apache/commons/io/input/BufferedFileChannelInputStream.java index 8825a51..5443340 100644 --- a/src/main/java/org/apache/commons/io/input/BufferedFileChannelInputStream.java +++ b/src/main/java/org/apache/commons/io/input/BufferedFileChannelInputStream.java @@ -98,12 +98,29 @@ public final class BufferedFileChannelInputStream extends InputStream { } /** + * Attempts to clean up a ByteBuffer if it is direct or memory-mapped. This uses an *unsafe* Sun API that will cause + * errors if one attempts to read from the disposed buffer. However, neither the bytes allocated to direct buffers + * nor file descriptors opened for memory-mapped buffers put pressure on the garbage collector. Waiting for garbage + * collection may lead to the depletion of off-heap memory or huge numbers of open files. There's unfortunately no + * standard API to manually dispose of these kinds of buffers. + * + * @param buffer the buffer to clean. + */ + private void clean(final ByteBuffer buffer) { + if (buffer instanceof sun.nio.ch.DirectBuffer) { + clean((sun.nio.ch.DirectBuffer) buffer); + } + } + + /** * In Java 8, the type of DirectBuffer.cleaner() was sun.misc.Cleaner, and it was possible to access the method * sun.misc.Cleaner.clean() to invoke it. The type changed to jdk.internal.ref.Cleaner in later JDKs, and the * .clean() method is not accessible even with reflection. However sun.misc.Unsafe added a invokeCleaner() method in * JDK 9+ and this is still accessible with reflection. + * + * @param buffer the buffer to clean. */ - private void bufferCleaner(final DirectBuffer buffer) { + private void clean(final DirectBuffer buffer) { // // Ported from StorageUtils.scala. // @@ -159,20 +176,7 @@ public final class BufferedFileChannelInputStream extends InputStream { try { fileChannel.close(); } finally { - dispose(byteBuffer); - } - } - - /** - * Attempts to clean up a ByteBuffer if it is direct or memory-mapped. This uses an *unsafe* Sun API that will cause - * errors if one attempts to read from the disposed buffer. However, neither the bytes allocated to direct buffers - * nor file descriptors opened for memory-mapped buffers put pressure on the garbage collector. Waiting for garbage - * collection may lead to the depletion of off-heap memory or huge numbers of open files. There's unfortunately no - * standard API to manually dispose of these kinds of buffers. - */ - private void dispose(final ByteBuffer buffer) { - if (buffer instanceof sun.nio.ch.DirectBuffer) { - bufferCleaner((sun.nio.ch.DirectBuffer) buffer); + clean(byteBuffer); } } diff --git a/src/main/java/org/apache/commons/io/input/ReadAheadInputStream.java b/src/main/java/org/apache/commons/io/input/ReadAheadInputStream.java index 9f6c7f0..ef1c58b 100644 --- a/src/main/java/org/apache/commons/io/input/ReadAheadInputStream.java +++ b/src/main/java/org/apache/commons/io/input/ReadAheadInputStream.java @@ -393,6 +393,9 @@ public class ReadAheadInputStream extends InputStream { /** * Internal skip function which should be called only from skip(). The assumption is that the stateChangeLock is * already acquired in the caller before calling this function. + * + * @param n the number of bytes to be skipped. + * @return the actual number of bytes skipped. */ private long skipInternal(final long n) throws IOException { assert stateChangeLock.isLocked();