jojochuang commented on a change in pull request #3322:
URL: https://github.com/apache/hadoop/pull/3322#discussion_r695375296



##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
##########
@@ -908,4 +911,51 @@ private static SnapshotStatus toSnapshotStatus(
                 SnapshotStatus.getParentPath(fullPath)));
     return snapshotStatus;
   }
+
+  static BlockLocation[] toBlockLocationArray(Map<?, ?> json)

Review comment:
       TODO: need test

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
##########
@@ -908,4 +911,51 @@ private static SnapshotStatus toSnapshotStatus(
                 SnapshotStatus.getParentPath(fullPath)));
     return snapshotStatus;
   }
+
+  static BlockLocation[] toBlockLocationArray(Map<?, ?> json)
+      throws IOException {
+    final Map<?, ?> rootmap =
+        (Map<?, ?>) json.get(BlockLocation.class.getSimpleName() + "s");
+    final List<?> array =
+        JsonUtilClient.getList(rootmap, BlockLocation.class.getSimpleName());
+    Preconditions.checkNotNull(array);
+    final BlockLocation[] locations = new BlockLocation[array.size()];
+    int i = 0;
+    for (Object object : array) {
+      final Map<?, ?> m = (Map<?, ?>) object;
+      locations[i++] = JsonUtilClient.toBlockLocation(m);
+    }
+    return locations;
+  }
+
+  /** Convert a Json map to BlockLocation. **/
+  static BlockLocation toBlockLocation(Map<?, ?> m) throws IOException {
+    if (m == null) {
+      return null;
+    }
+    long length = ((Number) m.get("length")).longValue();
+    long offset = ((Number) m.get("offset")).longValue();
+    boolean corrupt = Boolean.getBoolean(m.get("corrupt").toString());
+    String[] storageIds = toStringArray(getList(m, "storageIds"));
+    String[] cachedHosts = toStringArray(getList(m, "cachedHosts"));
+    String[] hosts = toStringArray(getList(m, "hosts"));
+    String[] names = toStringArray(getList(m, "names"));
+    String[] topologyPaths = toStringArray(getList(m, "topologyPaths"));
+    StorageType[] storageTypes = toStorageTypeArray(getList(m, 
"storageTypes"));
+    return new BlockLocation(names, hosts, cachedHosts, topologyPaths,
+        storageIds, storageTypes, offset, length, corrupt);
+  }
+
+  static String[] toStringArray(List<?> list) {

Review comment:
       VisibleForTesting

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
##########
@@ -908,4 +911,51 @@ private static SnapshotStatus toSnapshotStatus(
                 SnapshotStatus.getParentPath(fullPath)));
     return snapshotStatus;
   }
+
+  static BlockLocation[] toBlockLocationArray(Map<?, ?> json)
+      throws IOException {
+    final Map<?, ?> rootmap =
+        (Map<?, ?>) json.get(BlockLocation.class.getSimpleName() + "s");
+    final List<?> array =
+        JsonUtilClient.getList(rootmap, BlockLocation.class.getSimpleName());
+    Preconditions.checkNotNull(array);
+    final BlockLocation[] locations = new BlockLocation[array.size()];
+    int i = 0;
+    for (Object object : array) {
+      final Map<?, ?> m = (Map<?, ?>) object;
+      locations[i++] = JsonUtilClient.toBlockLocation(m);
+    }
+    return locations;
+  }
+
+  /** Convert a Json map to BlockLocation. **/
+  static BlockLocation toBlockLocation(Map<?, ?> m) throws IOException {

Review comment:
       need test.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
##########
@@ -2002,4 +2003,38 @@ public void testContentType() throws Exception {
         () -> HttpFSUtils.jsonParse(conn));
     conn.disconnect();
   }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testGetFileBlockLocations() throws Exception {
+    createHttpFSServer(false, false);
+    // Create a test directory
+    String pathStr = "/tmp/tmp-snap-diff-test";
+    createDirWithHttp(pathStr, "700", null);
+
+    Path path = new Path(pathStr);
+    DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
+        .get(path.toUri(), TestHdfsHelper.getHdfsConf());
+    // Enable snapshot
+    dfs.allowSnapshot(path);
+    Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
+    // Create a file and take a snapshot
+    String file1 = pathStr + "/file1";
+    createWithHttp(file1, null);
+    HttpURLConnection conn = sendRequestToHttpFSServer(file1,
+        "GETFILEBLOCKLOCATIONS", "length=10&offset10");
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+    BlockLocation[] locations1 =
+        dfs.getFileBlockLocations(new Path(file1), 0, 1);
+    Assert.assertNotNull(locations1);
+
+    HttpURLConnection conn1 = sendRequestToHttpFSServer(file1,
+        "GET_BLOCK_LOCATIONS", "length=10&offset10");
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn1.getResponseCode());
+    BlockLocation[] locations2 =
+        dfs.getFileBlockLocations(new Path(file1), 0, 1);
+    Assert.assertNotNull(locations2);
+  }

Review comment:
       addressed by testGetFileBlockLocationsFallback()

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
##########
@@ -1948,4 +1966,95 @@ public void testStoragePolicySatisfier() throws 
Exception {
       dfs.delete(path1, true);
     }
   }
+
+  private void testGetFileBlockLocations() throws Exception {
+    BlockLocation[] locations1, locations2 = null;

Review comment:
       need a better name. also, these two variables can be consolidated into 
one.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
##########
@@ -1948,4 +1966,95 @@ public void testStoragePolicySatisfier() throws 
Exception {
       dfs.delete(path1, true);
     }
   }
+
+  private void testGetFileBlockLocations() throws Exception {
+    BlockLocation[] locations1, locations2 = null;
+    Path testFile = null;
+    if (!this.isLocalFS()) {
+      FileSystem fs = this.getHttpFSFileSystem();
+      testFile = new Path(getProxiedFSTestDir(), "singleBlock.txt");
+      DFSTestUtil.createFile(fs, testFile, (long) 1, (short) 1, 0L);
+      if (fs instanceof HttpFSFileSystem) {
+        HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
+        locations1 = httpFS.getFileBlockLocations(testFile, 0, 1);
+        assertNotNull(locations1);
+
+        // TODO: add test for HttpFSFileSystem.toBlockLocations()
+        String jsonString = JsonUtil.toJsonString(locations1);
+        JSONParser parser = new JSONParser();
+        JSONObject jsonObject = (JSONObject)parser.parse(
+            jsonString, (ContainerFactory)null);
+        BlockLocation[] deserializedLocation =
+            httpFS.toBlockLocations(jsonObject);
+        assertEquals(locations1.length, deserializedLocation.length);
+        for (int i = 0; i < locations1.length; i++) {
+          assertEquals(locations1[i].toString(),
+              deserializedLocation[i].toString());
+        }
+      } else if (fs instanceof WebHdfsFileSystem) {
+        WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
+        locations2 = webHdfsFileSystem.getFileBlockLocations(testFile, 0, 1);
+        assertNotNull(locations2);
+      } else {
+        Assert
+            .fail(fs.getClass().getSimpleName() + " doesn't support access");
+      }
+    }
+  }
+
+  private void testGetFileBlockLocationsFallback() throws IOException,
+      InterruptedException {
+    Configuration conf = new Configuration(false);
+
+    // create a HDFS file. Get its HdfsFileStatus.
+    // convert that to json string, return back to client
+    // verify the client gets the expected string.
+    Path testFile = new Path(getProxiedFSTestDir(), "singleBlock.txt");
+    DistributedFileSystem distributedFs = (DistributedFileSystem) FileSystem
+        .get(testFile.toUri(), this.getProxiedFSConf());
+    DFSTestUtil.createFile(distributedFs, testFile, (long) 1, (short) 1, 0L);
+
+    MiniDFSCluster cluster = ((TestHdfsHelper) hdfsTestHelper)
+        .getMiniDFSCluster();
+    LocatedBlocks locatedBlocks = DFSClientAdapter.callGetBlockLocations(
+        cluster.getNameNodeRpc(0), testFile.toString(), 0L, 1);
+    String json = JsonUtil.toJsonString(locatedBlocks);
+    LOG.info("json = {}", json);
+
+    try (MockWebServer server = new MockWebServer()) {
+      server.enqueue(new MockResponse().setBody("{\n"
+          + "  \"RemoteException\":\n" + "  {\n"
+          + "    \"exception\"    : \"IllegalArgumentException\",\n"
+          + "    \"javaClassName\": \"java.lang.IllegalArgumentException\",\n"
+          + "    \"message\"      : \"Invalid value for webhdfs parameter 
\\\"GETFILEBLOCKLOCATIONS\\\": ...\"\n"
+          + "  }\n" + "}").setResponseCode(400)); // BAD_REQUEST
+      server.enqueue(new MockResponse().setBody(json));
+      server.start();
+      URI uri = URI.create(String.format("%s://%s:%d", getScheme(),
+          server.getHostName(), server.getPort()));
+
+      FileSystem fs = FileSystem.get(uri, conf);
+
+      // verify client FileSystem handles fallback from GETFILEBLOCKLOCATIONS 
to
+      // GET_BLOCK_LOCATIONS correctly.
+      BlockLocation[] locations = 
DFSUtilClient.locatedBlocks2Locations(locatedBlocks);
+      BlockLocation[] locations1 = fs.getFileBlockLocations(testFile, 0, 1);

Review comment:
       bad variable names




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to