ZanderXu commented on code in PR #4750:
URL: https://github.com/apache/hadoop/pull/4750#discussion_r975426391
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java:
##########
@@ -2192,4 +2194,78 @@ public Void execute(FileSystem fs) throws IOException {
return null;
}
}
+
+ /**
+ * Executor that performs a getFileBlockLocations operation.
+ */
+
+ @InterfaceAudience.Private
+ @SuppressWarnings("rawtypes")
+ public static class FSFileBlockLocations
+ implements FileSystemAccess.FileSystemExecutor<Map> {
+ private Path path;
+ private long offsetValue;
+ private long lengthValue;
Review Comment:
Can make these fields to final. Such as `private final long lengthValue;`
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java:
##########
@@ -370,7 +370,24 @@ public InputStream run() throws Exception {
break;
}
case GETFILEBLOCKLOCATIONS: {
- response = Response.status(Response.Status.BAD_REQUEST).build();
+ long offset = 0;
+ long len = Long.MAX_VALUE;
+ Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
+ Long lenParam = params.get(LenParam.NAME, LenParam.class);
+ AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+ new Object[] {path, offsetParam, lenParam });
Review Comment:
can change it to `AUDIT_LOG.info("[{}] offset [{}] len [{}]", path,
offsetParam, lenParam);`
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java:
##########
@@ -2192,4 +2194,78 @@ public Void execute(FileSystem fs) throws IOException {
return null;
}
}
+
+ /**
+ * Executor that performs a getFileBlockLocations operation.
+ */
+
+ @InterfaceAudience.Private
+ @SuppressWarnings("rawtypes")
+ public static class FSFileBlockLocations
+ implements FileSystemAccess.FileSystemExecutor<Map> {
+ private Path path;
+ private long offsetValue;
+ private long lengthValue;
+
+ /**
+ * Creates a file-block-locations executor.
+ *
+ * @param path the path to retrieve the location
+ * @param offsetValue offset into the given file
+ * @param lengthValue length for which to get locations for
+ */
+ public FSFileBlockLocations(String path, long offsetValue,
+ long lengthValue) {
+ this.path = new Path(path);
+ this.offsetValue = offsetValue;
+ this.lengthValue = lengthValue;
+ }
+
+ @Override
+ public Map execute(FileSystem fs) throws IOException {
+ BlockLocation[] locations = fs.getFileBlockLocations(this.path,
+ this.offsetValue, this.lengthValue);
+ return JsonUtil.toJsonMap(locations);
+ }
+ }
+
+ /**
+ * Executor that performs a getFileBlockLocations operation for legacy
+ * clients that supports only GET_BLOCK_LOCATIONS.
+ */
+
+ @InterfaceAudience.Private
+ @SuppressWarnings("rawtypes")
+ public static class FSFileBlockLocationsLegacy
+ implements FileSystemAccess.FileSystemExecutor<Map> {
+ private Path path;
+ private long offsetValue;
+ private long lengthValue;
Review Comment:
add final.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java:
##########
@@ -370,7 +370,24 @@ public InputStream run() throws Exception {
break;
}
case GETFILEBLOCKLOCATIONS: {
- response = Response.status(Response.Status.BAD_REQUEST).build();
+ long offset = 0;
+ long len = Long.MAX_VALUE;
+ Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
+ Long lenParam = params.get(LenParam.NAME, LenParam.class);
+ AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+ new Object[] {path, offsetParam, lenParam });
+ if (offsetParam != null && offsetParam.longValue() > 0) {
+ offset = offsetParam.longValue();
+ }
+ if (lenParam != null && lenParam.longValue() > 0) {
+ len = lenParam.longValue();
+ }
Review Comment:
can remove the `longValue()`, such as:
```
if (offsetParam != null && offsetParam > 0) {
offset = offsetParam;
}
if (lenParam != null && lenParam > 0) {
len = lenParam;
}
```
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java:
##########
@@ -127,6 +127,10 @@ public class HttpFSParametersProvider extends
ParametersProvider {
PARAMS_DEF.put(Operation.GETECPOLICY, new Class[] {});
PARAMS_DEF.put(Operation.UNSETECPOLICY, new Class[] {});
PARAMS_DEF.put(Operation.SATISFYSTORAGEPOLICY, new Class[] {});
+ PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS,
+ new Class[] {OffsetParam.class, LenParam.class});
Review Comment:
Can remove the redundant `Operation.GETFILEBLOCKLOCATIONS` in line 63.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java:
##########
@@ -510,6 +527,26 @@ public InputStream run() throws Exception {
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
+ case GET_BLOCK_LOCATIONS: {
Review Comment:
I have one question that how to use it? I didn't found the similar method to
`getFileBlockLocations` in `HttpFSFileSystem`.
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java:
##########
@@ -1882,18 +1884,59 @@ public BlockLocation[] getFileBlockLocations(final
FileStatus status,
}
@Override
- public BlockLocation[] getFileBlockLocations(final Path p,
- final long offset, final long length) throws IOException {
+ public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
+ final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
+ BlockLocation[] locations = null;
Review Comment:
can remove the redundant initializer.
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java:
##########
@@ -965,4 +968,53 @@ private static SnapshotStatus toSnapshotStatus(
SnapshotStatus.getParentPath(fullPath)));
return snapshotStatus;
}
+
+ @VisibleForTesting
+ public static BlockLocation[] toBlockLocationArray(Map<?, ?> json)
+ throws IOException {
+ final Map<?, ?> rootmap =
+ (Map<?, ?>) json.get(BlockLocation.class.getSimpleName() + "s");
Review Comment:
The `s` here is a bit confusing. How about change it to `BlockLocations`?
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java:
##########
@@ -1882,18 +1884,59 @@ public BlockLocation[] getFileBlockLocations(final
FileStatus status,
}
@Override
- public BlockLocation[] getFileBlockLocations(final Path p,
- final long offset, final long length) throws IOException {
+ public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
+ final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
+ BlockLocation[] locations = null;
+ try {
+ if (isServerHCFSCompatible) {
+ locations =
+ getFileBlockLocations(GetOpParam.Op.GETFILEBLOCKLOCATIONS, p,
offset, length);
+ } else {
+ locations = getFileBlockLocations(GetOpParam.Op.GET_BLOCK_LOCATIONS, p,
+ offset, length);
+ }
+ } catch (RemoteException e) {
+ // parsing the exception is needed only if the client thinks the service
+ // is compatible
Review Comment:
single line.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java:
##########
@@ -101,11 +112,13 @@
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@RunWith(value = Parameterized.class)
public abstract class BaseTestHttpFSWith extends HFSTestCase {
-
+ public static final Logger LOG = LoggerFactory
+ .getLogger(BaseTestHttpFSWith.class);
Review Comment:
this log is unused, maybe can removed.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java:
##########
@@ -1959,6 +1975,37 @@ public void testStoragePolicySatisfier() throws
Exception {
}
}
+ private void testGetFileBlockLocations() throws Exception {
+ BlockLocation[] blockLocations;
+ Path testFile = null;
+ if (!this.isLocalFS()) {
+ FileSystem fs = this.getHttpFSFileSystem();
+ testFile = new Path(getProxiedFSTestDir(), "singleBlock.txt");
+ DFSTestUtil.createFile(fs, testFile, (long) 1, (short) 1, 0L);
+ if (fs instanceof HttpFSFileSystem) {
+ HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
+ blockLocations = httpFS.getFileBlockLocations(testFile, 0, 1);
+ assertNotNull(blockLocations);
Review Comment:
How about comparing it to the blocklocations obtained from
DistributeFileSystem?
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtilClient.java:
##########
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.JsonSerialization;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+public class TestJsonUtilClient {
+ @Test
+ public void testToStringArray() throws Exception {
Review Comment:
Can remove the redundant `Exception`.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java:
##########
@@ -2003,4 +2006,40 @@ public void testContentType() throws Exception {
() -> HttpFSUtils.jsonParse(conn));
conn.disconnect();
}
+
+ @Test
+ @TestDir
+ @TestJetty
+ @TestHdfs
+ public void testGetFileBlockLocations() throws Exception {
+ createHttpFSServer(false, false);
+ // Create a test directory
+ String pathStr = "/tmp/tmp-get-block-location-test";
+ createDirWithHttp(pathStr, "700", null);
+
+ Path path = new Path(pathStr);
+ DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
+ .get(path.toUri(), TestHdfsHelper.getHdfsConf());
+
+ String file1 = pathStr + "/file1";
+ createWithHttp(file1, null);
+ HttpURLConnection conn = sendRequestToHttpFSServer(file1,
+ "GETFILEBLOCKLOCATIONS", "length=10&offset10");
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+ BlockLocation[] locations1 =
+ dfs.getFileBlockLocations(new Path(file1), 0, 1);
+ Assert.assertNotNull(locations1);
+
+ Map<?, ?> jsonMap =
JsonSerialization.mapReader().readValue(conn.getInputStream());
+
+ BlockLocation[] httpfsBlockLocations =
+ JsonUtilClient.toBlockLocationArray(jsonMap);
Review Comment:
single line.
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java:
##########
@@ -707,4 +720,18 @@ public static String toJsonString(BlockLocation[]
locations)
m.put(BlockLocation.class.getSimpleName(), blockLocations);
return toJsonString("BlockLocations", m);
}
+
+ public static Map<String, Object> toJsonMap(BlockLocation[] locations)
Review Comment:
line 712 ~ 720 can use this method? only different place is the type of Map,
`HashMap` and `TreeMap`. I think `HashMap` is ok, because there is only one key
`BlockLocation`.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java:
##########
@@ -2192,4 +2194,78 @@ public Void execute(FileSystem fs) throws IOException {
return null;
}
}
+
+ /**
+ * Executor that performs a getFileBlockLocations operation.
+ */
+
+ @InterfaceAudience.Private
+ @SuppressWarnings("rawtypes")
+ public static class FSFileBlockLocations
+ implements FileSystemAccess.FileSystemExecutor<Map> {
+ private Path path;
+ private long offsetValue;
+ private long lengthValue;
+
+ /**
+ * Creates a file-block-locations executor.
+ *
+ * @param path the path to retrieve the location
+ * @param offsetValue offset into the given file
+ * @param lengthValue length for which to get locations for
+ */
+ public FSFileBlockLocations(String path, long offsetValue,
+ long lengthValue) {
+ this.path = new Path(path);
+ this.offsetValue = offsetValue;
+ this.lengthValue = lengthValue;
+ }
+
+ @Override
+ public Map execute(FileSystem fs) throws IOException {
+ BlockLocation[] locations = fs.getFileBlockLocations(this.path,
+ this.offsetValue, this.lengthValue);
+ return JsonUtil.toJsonMap(locations);
+ }
+ }
+
+ /**
+ * Executor that performs a getFileBlockLocations operation for legacy
+ * clients that supports only GET_BLOCK_LOCATIONS.
+ */
+
+ @InterfaceAudience.Private
+ @SuppressWarnings("rawtypes")
+ public static class FSFileBlockLocationsLegacy
+ implements FileSystemAccess.FileSystemExecutor<Map> {
+ private Path path;
+ private long offsetValue;
+ private long lengthValue;
+
+ /**
+ * Creates a file-block-locations executor.
+ *
+ * @param path the path to retrieve the location
+ * @param offsetValue offset into the given file
+ * @param lengthValue length for which to get locations for
+ */
+ public FSFileBlockLocationsLegacy(String path, long offsetValue,
+ long lengthValue) {
Review Comment:
single line.
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java:
##########
@@ -1882,18 +1884,59 @@ public BlockLocation[] getFileBlockLocations(final
FileStatus status,
}
@Override
- public BlockLocation[] getFileBlockLocations(final Path p,
- final long offset, final long length) throws IOException {
+ public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
+ final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
+ BlockLocation[] locations = null;
+ try {
+ if (isServerHCFSCompatible) {
+ locations =
+ getFileBlockLocations(GetOpParam.Op.GETFILEBLOCKLOCATIONS, p,
offset, length);
+ } else {
+ locations = getFileBlockLocations(GetOpParam.Op.GET_BLOCK_LOCATIONS, p,
+ offset, length);
+ }
+ } catch (RemoteException e) {
+ // parsing the exception is needed only if the client thinks the service
+ // is compatible
+ if (isServerHCFSCompatible && isGetFileBlockLocationsException(e)) {
+ LOG.warn("Server does not appear to support GETFILEBLOCKLOCATIONS." +
+ "Fallback to the old GET_BLOCK_LOCATIONS. Exception: " +
+ e.getMessage());
+ isServerHCFSCompatible = false;
+ locations = getFileBlockLocations(GetOpParam.Op.GET_BLOCK_LOCATIONS, p,
+ offset, length);
Review Comment:
here too, single line.
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java:
##########
@@ -1882,18 +1884,59 @@ public BlockLocation[] getFileBlockLocations(final
FileStatus status,
}
@Override
- public BlockLocation[] getFileBlockLocations(final Path p,
- final long offset, final long length) throws IOException {
+ public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
+ final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
+ BlockLocation[] locations = null;
+ try {
+ if (isServerHCFSCompatible) {
+ locations =
Review Comment:
single line. And the max line size have changed from 80 to 100.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java:
##########
@@ -2192,4 +2194,78 @@ public Void execute(FileSystem fs) throws IOException {
return null;
}
}
+
+ /**
+ * Executor that performs a getFileBlockLocations operation.
+ */
+
+ @InterfaceAudience.Private
+ @SuppressWarnings("rawtypes")
+ public static class FSFileBlockLocations
+ implements FileSystemAccess.FileSystemExecutor<Map> {
+ private Path path;
+ private long offsetValue;
+ private long lengthValue;
+
+ /**
+ * Creates a file-block-locations executor.
+ *
+ * @param path the path to retrieve the location
+ * @param offsetValue offset into the given file
+ * @param lengthValue length for which to get locations for
+ */
+ public FSFileBlockLocations(String path, long offsetValue,
+ long lengthValue) {
Review Comment:
Single line.
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java:
##########
@@ -965,4 +968,53 @@ private static SnapshotStatus toSnapshotStatus(
SnapshotStatus.getParentPath(fullPath)));
return snapshotStatus;
}
+
+ @VisibleForTesting
+ public static BlockLocation[] toBlockLocationArray(Map<?, ?> json)
+ throws IOException {
+ final Map<?, ?> rootmap =
+ (Map<?, ?>) json.get(BlockLocation.class.getSimpleName() + "s");
+ final List<?> array =
+ JsonUtilClient.getList(rootmap, BlockLocation.class.getSimpleName());
+ Preconditions.checkNotNull(array);
+ final BlockLocation[] locations = new BlockLocation[array.size()];
+ int i = 0;
+ for (Object object : array) {
+ final Map<?, ?> m = (Map<?, ?>) object;
+ locations[i++] = JsonUtilClient.toBlockLocation(m);
+ }
+ return locations;
+ }
+
+ /** Convert a Json map to BlockLocation. **/
+ private static BlockLocation toBlockLocation(Map<?, ?> m) throws IOException
{
+ if (m == null) {
+ return null;
+ }
+ long length = ((Number) m.get("length")).longValue();
+ long offset = ((Number) m.get("offset")).longValue();
+ boolean corrupt = Boolean.getBoolean(m.get("corrupt").toString());
+ String[] storageIds = toStringArray(getList(m, "storageIds"));
Review Comment:
maybe there is no `storageIds`, because `Map<String, Object> toJsonMap(final
BlockLocation blockLocation` didn't serialize the `storageIds`.
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java:
##########
@@ -1882,18 +1884,59 @@ public BlockLocation[] getFileBlockLocations(final
FileStatus status,
}
@Override
- public BlockLocation[] getFileBlockLocations(final Path p,
- final long offset, final long length) throws IOException {
+ public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
+ final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
+ BlockLocation[] locations = null;
+ try {
+ if (isServerHCFSCompatible) {
+ locations =
+ getFileBlockLocations(GetOpParam.Op.GETFILEBLOCKLOCATIONS, p,
offset, length);
+ } else {
+ locations = getFileBlockLocations(GetOpParam.Op.GET_BLOCK_LOCATIONS, p,
+ offset, length);
+ }
+ } catch (RemoteException e) {
+ // parsing the exception is needed only if the client thinks the service
+ // is compatible
+ if (isServerHCFSCompatible && isGetFileBlockLocationsException(e)) {
+ LOG.warn("Server does not appear to support GETFILEBLOCKLOCATIONS." +
+ "Fallback to the old GET_BLOCK_LOCATIONS. Exception: " +
+ e.getMessage());
+ isServerHCFSCompatible = false;
+ locations = getFileBlockLocations(GetOpParam.Op.GET_BLOCK_LOCATIONS, p,
+ offset, length);
+ } else {
+ throw e;
+ }
+ }
+ return locations;
+ }
- final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
+ private boolean isGetFileBlockLocationsException(RemoteException e) {
+ return e.getMessage() != null
+ && e.getMessage().contains("Invalid value for webhdfs parameter")
+ && e.getMessage()
+ .contains(GetOpParam.Op.GETFILEBLOCKLOCATIONS.toString());
Review Comment:
single line.
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java:
##########
@@ -1882,18 +1884,59 @@ public BlockLocation[] getFileBlockLocations(final
FileStatus status,
}
@Override
- public BlockLocation[] getFileBlockLocations(final Path p,
- final long offset, final long length) throws IOException {
+ public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
+ final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
+ BlockLocation[] locations = null;
+ try {
+ if (isServerHCFSCompatible) {
+ locations =
+ getFileBlockLocations(GetOpParam.Op.GETFILEBLOCKLOCATIONS, p,
offset, length);
+ } else {
+ locations = getFileBlockLocations(GetOpParam.Op.GET_BLOCK_LOCATIONS, p,
+ offset, length);
+ }
+ } catch (RemoteException e) {
+ // parsing the exception is needed only if the client thinks the service
+ // is compatible
+ if (isServerHCFSCompatible && isGetFileBlockLocationsException(e)) {
+ LOG.warn("Server does not appear to support GETFILEBLOCKLOCATIONS." +
+ "Fallback to the old GET_BLOCK_LOCATIONS. Exception: " +
+ e.getMessage());
+ isServerHCFSCompatible = false;
+ locations = getFileBlockLocations(GetOpParam.Op.GET_BLOCK_LOCATIONS, p,
+ offset, length);
+ } else {
+ throw e;
+ }
+ }
+ return locations;
+ }
- final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
+ private boolean isGetFileBlockLocationsException(RemoteException e) {
+ return e.getMessage() != null
+ && e.getMessage().contains("Invalid value for webhdfs parameter")
+ && e.getMessage()
+ .contains(GetOpParam.Op.GETFILEBLOCKLOCATIONS.toString());
+ }
+
+ private BlockLocation[] getFileBlockLocations(GetOpParam.Op operation,
+ final Path p, final long offset, final long length) throws IOException {
+ final HttpOpParam.Op op = operation;
return new FsPathResponseRunner<BlockLocation[]>(op, p,
new OffsetParam(offset), new LengthParam(length)) {
@Override
- BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
- return DFSUtilClient.locatedBlocks2Locations(
- JsonUtilClient.toLocatedBlocks(json));
+ BlockLocation[] decodeResponse(Map<?, ?> json) throws IOException {
+ switch (operation) {
+ case GETFILEBLOCKLOCATIONS:
Review Comment:
Indent by two spaces?
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java:
##########
@@ -2003,4 +2006,40 @@ public void testContentType() throws Exception {
() -> HttpFSUtils.jsonParse(conn));
conn.disconnect();
}
+
+ @Test
+ @TestDir
+ @TestJetty
+ @TestHdfs
+ public void testGetFileBlockLocations() throws Exception {
+ createHttpFSServer(false, false);
+ // Create a test directory
+ String pathStr = "/tmp/tmp-get-block-location-test";
+ createDirWithHttp(pathStr, "700", null);
+
+ Path path = new Path(pathStr);
+ DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
+ .get(path.toUri(), TestHdfsHelper.getHdfsConf());
+
+ String file1 = pathStr + "/file1";
+ createWithHttp(file1, null);
+ HttpURLConnection conn = sendRequestToHttpFSServer(file1,
+ "GETFILEBLOCKLOCATIONS", "length=10&offset10");
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+ BlockLocation[] locations1 =
+ dfs.getFileBlockLocations(new Path(file1), 0, 1);
Review Comment:
single line.
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java:
##########
@@ -1882,18 +1884,59 @@ public BlockLocation[] getFileBlockLocations(final
FileStatus status,
}
@Override
- public BlockLocation[] getFileBlockLocations(final Path p,
- final long offset, final long length) throws IOException {
+ public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
+ final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
+ BlockLocation[] locations = null;
+ try {
+ if (isServerHCFSCompatible) {
+ locations =
+ getFileBlockLocations(GetOpParam.Op.GETFILEBLOCKLOCATIONS, p,
offset, length);
+ } else {
+ locations = getFileBlockLocations(GetOpParam.Op.GET_BLOCK_LOCATIONS, p,
+ offset, length);
Review Comment:
single line.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java:
##########
@@ -18,7 +18,11 @@
package org.apache.hadoop.fs.http.client;
+import okhttp3.mockwebserver.MockResponse;
+import okhttp3.mockwebserver.MockWebServer;
+import okhttp3.mockwebserver.RecordedRequest;
Review Comment:
Unused?
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java:
##########
@@ -1959,6 +1975,37 @@ public void testStoragePolicySatisfier() throws
Exception {
}
}
+ private void testGetFileBlockLocations() throws Exception {
+ BlockLocation[] blockLocations;
+ Path testFile = null;
+ if (!this.isLocalFS()) {
+ FileSystem fs = this.getHttpFSFileSystem();
+ testFile = new Path(getProxiedFSTestDir(), "singleBlock.txt");
+ DFSTestUtil.createFile(fs, testFile, (long) 1, (short) 1, 0L);
+ if (fs instanceof HttpFSFileSystem) {
+ HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
+ blockLocations = httpFS.getFileBlockLocations(testFile, 0, 1);
+ assertNotNull(blockLocations);
+
+ // verify HttpFSFileSystem.toBlockLocations()
+ String jsonString = JsonUtil.toJsonString(blockLocations);
+ JSONParser parser = new JSONParser();
+ JSONObject jsonObject = (JSONObject) parser.parse(jsonString,
(ContainerFactory) null);
+ BlockLocation[] deserializedLocation =
HttpFSFileSystem.toBlockLocations(jsonObject);
+ assertEquals(blockLocations.length, deserializedLocation.length);
+ for (int i = 0; i < blockLocations.length; i++) {
+ assertEquals(blockLocations[i].toString(),
deserializedLocation[i].toString());
+ }
+ } else if (fs instanceof WebHdfsFileSystem) {
+ WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
+ blockLocations = webHdfsFileSystem.getFileBlockLocations(testFile, 0,
1);
+ assertNotNull(blockLocations);
Review Comment:
here too.
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtilClient.java:
##########
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.JsonSerialization;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+public class TestJsonUtilClient {
+ @Test
+ public void testToStringArray() throws Exception {
+ List<String> strList = new ArrayList<String>(
+ Arrays.asList("aaa", "bbb", "ccc"));
Review Comment:
single line.
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java:
##########
@@ -2003,4 +2006,40 @@ public void testContentType() throws Exception {
() -> HttpFSUtils.jsonParse(conn));
conn.disconnect();
}
+
+ @Test
+ @TestDir
+ @TestJetty
+ @TestHdfs
+ public void testGetFileBlockLocations() throws Exception {
+ createHttpFSServer(false, false);
+ // Create a test directory
+ String pathStr = "/tmp/tmp-get-block-location-test";
+ createDirWithHttp(pathStr, "700", null);
+
+ Path path = new Path(pathStr);
+ DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
+ .get(path.toUri(), TestHdfsHelper.getHdfsConf());
+
+ String file1 = pathStr + "/file1";
+ createWithHttp(file1, null);
+ HttpURLConnection conn = sendRequestToHttpFSServer(file1,
+ "GETFILEBLOCKLOCATIONS", "length=10&offset10");
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+ BlockLocation[] locations1 =
+ dfs.getFileBlockLocations(new Path(file1), 0, 1);
+ Assert.assertNotNull(locations1);
+
+ Map<?, ?> jsonMap =
JsonSerialization.mapReader().readValue(conn.getInputStream());
+
+ BlockLocation[] httpfsBlockLocations =
+ JsonUtilClient.toBlockLocationArray(jsonMap);
+
+ assertEquals(locations1.length, httpfsBlockLocations.length);
+ for (int i = 0; i < locations1.length; i++) {
+ assertEquals(locations1.toString(), httpfsBlockLocations.toString());
+ }
+
+ conn.getInputStream().close();
Review Comment:
do this in `finally` to ensure this `conn` can be closed?
##########
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java:
##########
@@ -2003,4 +2006,40 @@ public void testContentType() throws Exception {
() -> HttpFSUtils.jsonParse(conn));
conn.disconnect();
}
+
+ @Test
+ @TestDir
+ @TestJetty
+ @TestHdfs
+ public void testGetFileBlockLocations() throws Exception {
+ createHttpFSServer(false, false);
+ // Create a test directory
+ String pathStr = "/tmp/tmp-get-block-location-test";
+ createDirWithHttp(pathStr, "700", null);
+
+ Path path = new Path(pathStr);
+ DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
+ .get(path.toUri(), TestHdfsHelper.getHdfsConf());
+
+ String file1 = pathStr + "/file1";
+ createWithHttp(file1, null);
+ HttpURLConnection conn = sendRequestToHttpFSServer(file1,
+ "GETFILEBLOCKLOCATIONS", "length=10&offset10");
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+ BlockLocation[] locations1 =
+ dfs.getFileBlockLocations(new Path(file1), 0, 1);
+ Assert.assertNotNull(locations1);
+
+ Map<?, ?> jsonMap =
JsonSerialization.mapReader().readValue(conn.getInputStream());
+
+ BlockLocation[] httpfsBlockLocations =
+ JsonUtilClient.toBlockLocationArray(jsonMap);
+
+ assertEquals(locations1.length, httpfsBlockLocations.length);
+ for (int i = 0; i < locations1.length; i++) {
+ assertEquals(locations1.toString(), httpfsBlockLocations.toString());
Review Comment:
here maybe is `assertEquals(locations1[i].toString(),
httpfsBlockLocations[i].toString());` ?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]