[
https://issues.apache.org/jira/browse/HADOOP-17347?focusedWorklogId=522807&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-522807
]
ASF GitHub Bot logged work on HADOOP-17347:
-------------------------------------------
Author: ASF GitHub Bot
Created on: 10/Dec/20 17:22
Start Date: 10/Dec/20 17:22
Worklog Time Spent: 10m
Work Description: bilaharith commented on a change in pull request #2464:
URL: https://github.com/apache/hadoop/pull/2464#discussion_r540354018
##########
File path:
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java
##########
@@ -224,6 +240,123 @@ private int readOneBlock(final byte[] b, final int off,
final int len) throws IO
return bytesToRead;
}
+ private boolean shouldReadFully() {
+ return this.firstRead && this.context.readSmallFilesCompletely()
+ && this.contentLength <= this.bufferSize;
+ }
+
+ private boolean shouldReadLastBlock(int len) {
+ return this.firstRead && this.context.optimizeFooterRead()
+ && len == FOOTER_SIZE
+ && this.fCursor == this.contentLength - FOOTER_SIZE;
+ }
+
+ private int readFileCompletely(final byte[] b, final int off, final int len)
+ throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+
+ Preconditions.checkNotNull(b);
+ LOG.debug("read one block requested b.length = {} off {} len {}", b.length,
+ off, len);
+
+ if (len == 0) {
+ return 0;
+ }
+
+ if (this.available() == 0) {
+ return -1;
+ }
+
+ if (off < 0 || len < 0 || len > b.length - off) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ buffer = new byte[bufferSize];
+ // data need to be copied to user buffer from index bCursor, bCursor has
+ // to be the current fCusor
+ bCursor = (int) fCursor;
+ fCursorAfterLastRead = fCursor;
+ int totalBytesRead = 0;
+ int loopCount = 0;
+ // Read from begining
+ fCursor = 0;
+ while (fCursor < contentLength) {
+ int bytesRead = readInternal(fCursor, buffer, limit,
+ (int) contentLength - limit, true);
+ if (bytesRead > 0) {
+ totalBytesRead += bytesRead;
+ limit += bytesRead;
+ fCursor += bytesRead;
+ }
+ if (loopCount++ >= 10) {
Review comment:
Made changes the following way
1. The available data is returned
2. Done
3. Done
4. Done
##########
File path:
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java
##########
@@ -224,6 +240,123 @@ private int readOneBlock(final byte[] b, final int off,
final int len) throws IO
return bytesToRead;
}
+ private boolean shouldReadFully() {
+ return this.firstRead && this.context.readSmallFilesCompletely()
+ && this.contentLength <= this.bufferSize;
+ }
+
+ private boolean shouldReadLastBlock(int len) {
+ return this.firstRead && this.context.optimizeFooterRead()
+ && len == FOOTER_SIZE
+ && this.fCursor == this.contentLength - FOOTER_SIZE;
+ }
+
+ private int readFileCompletely(final byte[] b, final int off, final int len)
+ throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+
+ Preconditions.checkNotNull(b);
+ LOG.debug("read one block requested b.length = {} off {} len {}", b.length,
+ off, len);
+
+ if (len == 0) {
+ return 0;
+ }
+
+ if (this.available() == 0) {
+ return -1;
+ }
+
+ if (off < 0 || len < 0 || len > b.length - off) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ buffer = new byte[bufferSize];
+ // data need to be copied to user buffer from index bCursor, bCursor has
+ // to be the current fCusor
+ bCursor = (int) fCursor;
+ fCursorAfterLastRead = fCursor;
+ int totalBytesRead = 0;
+ int loopCount = 0;
+ // Read from begining
+ fCursor = 0;
+ while (fCursor < contentLength) {
+ int bytesRead = readInternal(fCursor, buffer, limit,
+ (int) contentLength - limit, true);
+ if (bytesRead > 0) {
+ totalBytesRead += bytesRead;
+ limit += bytesRead;
+ fCursor += bytesRead;
+ }
+ if (loopCount++ >= 10) {
+ throw new IOException(
+ "Too many attempts in reading whole file " + path);
+ }
+ }
+ firstRead = false;
+ if (totalBytesRead == -1) {
+ return -1;
+ }
+ return copyToUserBuffer(b, off, len);
+ }
+
+ private int readLastBlock(final byte[] b, final int off, final int len)
+ throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+
+ Preconditions.checkNotNull(b);
+ LOG.debug("read one block requested b.length = {} off {} len {}", b.length,
+ off, len);
+
+ if (len == 0) {
+ return 0;
+ }
+
+ if (this.available() == 0) {
+ return -1;
+ }
+
+ if (off < 0 || len < 0 || len > b.length - off) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ buffer = new byte[bufferSize];
+ // data need to be copied to user buffer from index bCursor, for small
+ // files the bCursor will be contentlength - footer size,
+ // otherwise buffersize - footer size
+ bCursor = (int) (Math.min(contentLength, bufferSize) - FOOTER_SIZE);
+ // read API call is considered 1 single operation in reality server could
+ // return partial data and client has to retry untill the last full block
+ // is read. So setting the fCursorAfterLastRead before the possible
+ // multiple server calls
+ fCursorAfterLastRead = fCursor;
+ // 0 if contentlength is < buffersize
+ fCursor = Math.max(0, contentLength - bufferSize);
+ int totalBytesRead = 0;
+ int loopCount = 0;
+ while (fCursor < contentLength) {
+ int bytesRead = readInternal(fCursor, buffer, limit, bufferSize - limit,
+ true);
+ if (bytesRead > 0) {
+ totalBytesRead += bytesRead;
+ limit += bytesRead;
+ fCursor += bytesRead;
+ }
+ if (loopCount++ >= 10) {
Review comment:
Done
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
Issue Time Tracking
-------------------
Worklog Id: (was: 522807)
Time Spent: 4h 20m (was: 4h 10m)
> ABFS: Read optimizations
> ------------------------
>
> Key: HADOOP-17347
> URL: https://issues.apache.org/jira/browse/HADOOP-17347
> Project: Hadoop Common
> Issue Type: Sub-task
> Components: fs/azure
> Affects Versions: 3.4.0
> Reporter: Bilahari T H
> Assignee: Bilahari T H
> Priority: Major
> Labels: pull-request-available
> Time Spent: 4h 20m
> Remaining Estimate: 0h
>
> Optimize read performance for the following scenarios
> # Read small files completely
> Files that are of size smaller than the read buffer size can be considered
> as small files. In case of such files it would be better to read the full
> file into the AbfsInputStream buffer.
> # Read last block if the read is for footer
> If the read is for the last 8 bytes, read the full file.
> This will optimize reads for parquet files. [Parquet file
> format|https://www.ellicium.com/parquet-file-format-structure/]
> Both these optimizations will be present under configs as follows
> # fs.azure.read.smallfilescompletely
> # fs.azure.read.optimizefooterread
--
This message was sent by Atlassian Jira
(v8.3.4#803005)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]