[ https://issues.apache.org/jira/browse/HDFS-6874?focusedWorklogId=642108&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-642108 ]
ASF GitHub Bot logged work on HDFS-6874: ---------------------------------------- Author: ASF GitHub Bot Created on: 26/Aug/21 02:28 Start Date: 26/Aug/21 02:28 Worklog Time Spent: 10m Work Description: jojochuang commented on a change in pull request #3322: URL: https://github.com/apache/hadoop/pull/3322#discussion_r696240622 ########## File path: hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java ########## @@ -2002,4 +2003,38 @@ public void testContentType() throws Exception { () -> HttpFSUtils.jsonParse(conn)); conn.disconnect(); } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testGetFileBlockLocations() throws Exception { + createHttpFSServer(false, false); + // Create a test directory + String pathStr = "/tmp/tmp-snap-diff-test"; + createDirWithHttp(pathStr, "700", null); + + Path path = new Path(pathStr); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem + .get(path.toUri(), TestHdfsHelper.getHdfsConf()); + // Enable snapshot + dfs.allowSnapshot(path); + Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled()); + // Create a file and take a snapshot + String file1 = pathStr + "/file1"; + createWithHttp(file1, null); + HttpURLConnection conn = sendRequestToHttpFSServer(file1, + "GETFILEBLOCKLOCATIONS", "length=10&offset10"); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + BlockLocation[] locations1 = + dfs.getFileBlockLocations(new Path(file1), 0, 1); + Assert.assertNotNull(locations1); Review comment: it makes no sense to check nullity of locations1 using hdfs. The code doesn't change the file, so why check it? ########## File path: hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java ########## @@ -2002,4 +2003,38 @@ public void testContentType() throws Exception { () -> HttpFSUtils.jsonParse(conn)); conn.disconnect(); } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testGetFileBlockLocations() throws Exception { + createHttpFSServer(false, false); + // Create a test directory + String pathStr = "/tmp/tmp-snap-diff-test"; + createDirWithHttp(pathStr, "700", null); + + Path path = new Path(pathStr); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem + .get(path.toUri(), TestHdfsHelper.getHdfsConf()); + // Enable snapshot + dfs.allowSnapshot(path); + Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled()); + // Create a file and take a snapshot + String file1 = pathStr + "/file1"; + createWithHttp(file1, null); + HttpURLConnection conn = sendRequestToHttpFSServer(file1, + "GETFILEBLOCKLOCATIONS", "length=10&offset10"); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + BlockLocation[] locations1 = + dfs.getFileBlockLocations(new Path(file1), 0, 1); + Assert.assertNotNull(locations1); + + HttpURLConnection conn1 = sendRequestToHttpFSServer(file1, + "GET_BLOCK_LOCATIONS", "length=10&offset10"); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn1.getResponseCode()); + BlockLocation[] locations2 = + dfs.getFileBlockLocations(new Path(file1), 0, 1); + Assert.assertNotNull(locations2); Review comment: here, too. ########## File path: hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java ########## @@ -2002,4 +2003,38 @@ public void testContentType() throws Exception { () -> HttpFSUtils.jsonParse(conn)); conn.disconnect(); } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testGetFileBlockLocations() throws Exception { + createHttpFSServer(false, false); + // Create a test directory + String pathStr = "/tmp/tmp-snap-diff-test"; + createDirWithHttp(pathStr, "700", null); + + Path path = new Path(pathStr); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem + .get(path.toUri(), TestHdfsHelper.getHdfsConf()); + // Enable snapshot Review comment: no i don't understand this. it doesn't look like snapshot is used at all. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 642108) Time Spent: 1.5h (was: 1h 20m) > Add GETFILEBLOCKLOCATIONS operation to HttpFS > --------------------------------------------- > > Key: HDFS-6874 > URL: https://issues.apache.org/jira/browse/HDFS-6874 > Project: Hadoop HDFS > Issue Type: Improvement > Components: httpfs > Affects Versions: 2.4.1, 2.7.3 > Reporter: Gao Zhong Liang > Assignee: Weiwei Yang > Priority: Major > Labels: BB2015-05-TBR, pull-request-available > Attachments: HDFS-6874-1.patch, HDFS-6874-branch-2.6.0.patch, > HDFS-6874.011.patch, HDFS-6874.02.patch, HDFS-6874.03.patch, > HDFS-6874.04.patch, HDFS-6874.05.patch, HDFS-6874.06.patch, > HDFS-6874.07.patch, HDFS-6874.08.patch, HDFS-6874.09.patch, > HDFS-6874.10.patch, HDFS-6874.patch > > Time Spent: 1.5h > Remaining Estimate: 0h > > GETFILEBLOCKLOCATIONS operation is missing in HttpFS, which is already > supported in WebHDFS. For the request of GETFILEBLOCKLOCATIONS in > org.apache.hadoop.fs.http.server.HttpFSServer, BAD_REQUEST is returned so far: > ....... > case GETFILEBLOCKLOCATIONS: { > response = Response.status(Response.Status.BAD_REQUEST).build(); > break; > } > ........ -- This message was sent by Atlassian Jira (v8.3.4#803005) --------------------------------------------------------------------- To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org