Author: szetszwo
Date: Wed Oct 26 19:33:44 2011
New Revision: 1189405
URL: http://svn.apache.org/viewvc?rev=1189405&view=rev
Log:
svn merge -c 1170085 from trunk for HDFS-2317.
Added:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
- copied unchanged from r1170085,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
- copied unchanged from r1170085,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/
(props changed)
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
(props changed)
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UriFsPathParam.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Oct 26 19:33:44 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170379,1170459,1171297,1172916,1173402,1176550,1176733,1177487,1177531,1177859,1177864
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1171297,1172916,1173402,1176550,1176733,1177487,1177531,1177859,1177864
/hadoop/core/branches/branch-0.19/hdfs:713112
/hadoop/hdfs/branches/HDFS-1052:987665-1095512
/hadoop/hdfs/branches/HDFS-265:796829-820463
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Wed Oct 26 19:33:44 2011
@@ -311,6 +311,8 @@ Release 0.23.0 - Unreleased
HDFS-2284. Add a new FileSystem, webhdfs://, for supporting write Http
access to HDFS. (szetszwo)
+ HDFS-2317. Support read access to HDFS in webhdfs. (szetszwo)
+
IMPROVEMENTS
HDFS-1875. MiniDFSCluster hard-codes dfs.datanode.address to localhost
Propchange:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Oct 26 19:33:44 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170379,1170459,1171297,1172916,1173402,1176550,1176733,1177487,1177531,1177859,1177864
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1171297,1172916,1173402,1176550,1176733,1177487,1177531,1177859,1177864
/hadoop/core/branches/branch-0.19/hdfs/src/java:713112
/hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
/hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
Wed Oct 26 19:33:44 2011
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.nam
* is made on the successive read(). The normal input stream functions are
* connected to the currently active input stream.
*/
-class ByteRangeInputStream extends FSInputStream {
+public class ByteRangeInputStream extends FSInputStream {
/**
* This class wraps a URL to allow easy mocking when testing. The URL class
@@ -71,7 +71,8 @@ class ByteRangeInputStream extends FSInp
StreamStatus status = StreamStatus.SEEK;
- ByteRangeInputStream(final URL url) {
+ /** Create an input stream with the URL. */
+ public ByteRangeInputStream(final URL url) {
this(new URLOpener(url), new URLOpener(null));
}
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
Wed Oct 26 19:33:44 2011
@@ -229,12 +229,11 @@ public class DistributedFileSystem exten
return dfs.recoverLease(getPathName(f));
}
- @SuppressWarnings("deprecation")
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
statistics.incrementReadOps(1);
return new DFSClient.DFSDataInputStream(
- dfs.open(getPathName(f), bufferSize, verifyChecksum, statistics));
+ dfs.open(getPathName(f), bufferSize, verifyChecksum));
}
/** This optional operation is not yet supported. */
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
Wed Oct 26 19:33:44 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.da
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
@@ -27,6 +28,7 @@ import java.util.EnumSet;
import javax.servlet.ServletContext;
import javax.ws.rs.Consumes;
import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
@@ -36,6 +38,7 @@ import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+import javax.ws.rs.core.StreamingOutput;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -43,12 +46,16 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
+import org.apache.hadoop.hdfs.web.resources.LengthParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.hdfs.web.resources.PermissionParam;
@@ -61,7 +68,7 @@ import org.apache.hadoop.io.IOUtils;
/** Web-hdfs DataNode implementation. */
@Path("")
public class DatanodeWebHdfsMethods {
- private static final Log LOG =
LogFactory.getLog(DatanodeWebHdfsMethods.class);
+ public static final Log LOG =
LogFactory.getLog(DatanodeWebHdfsMethods.class);
private @Context ServletContext context;
@@ -166,4 +173,56 @@ public class DatanodeWebHdfsMethods {
throw new UnsupportedOperationException(op + " is not supported");
}
}
+
+ /** Handle HTTP GET request. */
+ @GET
+ @Path("{" + UriFsPathParam.NAME + ":.*}")
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
+ public Response get(
+ @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
+ @QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT)
+ final GetOpParam op,
+ @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT)
+ final OffsetParam offset,
+ @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
+ final LengthParam length,
+ @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+ final BufferSizeParam bufferSize
+ ) throws IOException, URISyntaxException {
+
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(op + ": " + path
+ + Param.toSortedString(", ", offset, length, bufferSize));
+ }
+
+ final String fullpath = path.getAbsolutePath();
+ final DataNode datanode = (DataNode)context.getAttribute("datanode");
+
+ switch(op.getValue()) {
+ case OPEN:
+ {
+ final Configuration conf = new Configuration(datanode.getConf());
+ final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
+ final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
+ final DFSDataInputStream in = new DFSClient.DFSDataInputStream(
+ dfsclient.open(fullpath, bufferSize.getValue(), true));
+ in.seek(offset.getValue());
+
+ final StreamingOutput streaming = new StreamingOutput() {
+ @Override
+ public void write(final OutputStream out) throws IOException {
+ final Long n = length.getValue();
+ if (n == null) {
+ IOUtils.copyBytes(in, out, bufferSize.getValue());
+ } else {
+ IOUtils.copyBytes(in, out, n, false);
+ }
+ }
+ };
+ return
Response.ok(streaming).type(MediaType.APPLICATION_OCTET_STREAM).build();
+ }
+ default:
+ throw new UnsupportedOperationException(op + " is not supported");
+ }
+ }
}
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
Wed Oct 26 19:33:44 2011
@@ -17,8 +17,11 @@
*/
package org.apache.hadoop.hdfs.server.namenode.web.resources;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.PrintStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.EnumSet;
@@ -37,11 +40,13 @@ import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+import javax.ws.rs.core.StreamingOutput;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -58,7 +63,9 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
+import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
import org.apache.hadoop.hdfs.web.resources.OwnerParam;
import org.apache.hadoop.hdfs.web.resources.Param;
@@ -79,15 +86,23 @@ public class NamenodeWebHdfsMethods {
private @Context ServletContext context;
private static DatanodeInfo chooseDatanode(final NameNode namenode,
- final String path, final HttpOpParam.Op op) throws IOException {
- if (op == PostOpParam.Op.APPEND) {
- final HdfsFileStatus status = namenode.getRpcServer().getFileInfo(path);
+ final String path, final HttpOpParam.Op op, final long openOffset
+ ) throws IOException {
+ if (op == GetOpParam.Op.OPEN || op == PostOpParam.Op.APPEND) {
+ final NamenodeProtocols np = namenode.getRpcServer();
+ final HdfsFileStatus status = np.getFileInfo(path);
final long len = status.getLen();
+ if (op == GetOpParam.Op.OPEN && (openOffset < 0L || openOffset >= len)) {
+ throw new IOException("Offset=" + openOffset + " out of the range [0, "
+ + len + "); " + op + ", path=" + path);
+ }
+
if (len > 0) {
- final LocatedBlocks locations =
namenode.getRpcServer().getBlockLocations(path, len-1, 1);
+ final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
+ final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
- return JspHelper.bestNode(locations.get(count - 1));
+ return JspHelper.bestNode(locations.get(0));
}
}
}
@@ -98,9 +113,9 @@ public class NamenodeWebHdfsMethods {
}
private static URI redirectURI(final NameNode namenode,
- final String path, final HttpOpParam.Op op,
+ final String path, final HttpOpParam.Op op, final long openOffset,
final Param<?, ?>... parameters) throws URISyntaxException, IOException {
- final DatanodeInfo dn = chooseDatanode(namenode, path, op);
+ final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset);
final String query = op.toQueryString() + Param.toSortedString("&",
parameters);
final String uripath = "/" + WebHdfsFileSystem.PATH_PREFIX + path;
@@ -148,8 +163,9 @@ public class NamenodeWebHdfsMethods {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path
- + Param.toSortedString(", ", dstPath, owner, group, permission,
- overwrite, bufferSize, replication, blockSize));
+ + Param.toSortedString(", ", dstPath, owner, group, permission,
+ overwrite, bufferSize, replication, blockSize,
+ modificationTime, accessTime, renameOptions));
}
final String fullpath = path.getAbsolutePath();
@@ -159,7 +175,7 @@ public class NamenodeWebHdfsMethods {
switch(op.getValue()) {
case CREATE:
{
- final URI uri = redirectURI(namenode, fullpath, op.getValue(),
+ final URI uri = redirectURI(namenode, fullpath, op.getValue(), -1L,
permission, overwrite, bufferSize, replication, blockSize);
return Response.temporaryRedirect(uri).build();
}
@@ -234,7 +250,8 @@ public class NamenodeWebHdfsMethods {
switch(op.getValue()) {
case APPEND:
{
- final URI uri = redirectURI(namenode, fullpath, op.getValue(),
bufferSize);
+ final URI uri = redirectURI(namenode, fullpath, op.getValue(), -1L,
+ bufferSize);
return Response.temporaryRedirect(uri).build();
}
default:
@@ -250,9 +267,15 @@ public class NamenodeWebHdfsMethods {
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response root(
@QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT)
- final GetOpParam op
- ) throws IOException {
- return get(ROOT, op);
+ final GetOpParam op,
+ @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT)
+ final OffsetParam offset,
+ @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
+ final LengthParam length,
+ @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+ final BufferSizeParam bufferSize
+ ) throws IOException, URISyntaxException {
+ return get(ROOT, op, offset, length, bufferSize);
}
/** Handle HTTP GET request. */
@@ -262,27 +285,89 @@ public class NamenodeWebHdfsMethods {
public Response get(
@PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
@QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT)
- final GetOpParam op
- ) throws IOException {
+ final GetOpParam op,
+ @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT)
+ final OffsetParam offset,
+ @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
+ final LengthParam length,
+ @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+ final BufferSizeParam bufferSize
+ ) throws IOException, URISyntaxException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ", " + path
- + Param.toSortedString(", "));
+ + Param.toSortedString(", ", offset, length, bufferSize));
}
+ final NameNode namenode = (NameNode)context.getAttribute("name.node");
+ final String fullpath = path.getAbsolutePath();
+ final NamenodeProtocols np = namenode.getRpcServer();
+
switch(op.getValue()) {
+ case OPEN:
+ {
+ final URI uri = redirectURI(namenode, fullpath, op.getValue(),
+ offset.getValue(), offset, length, bufferSize);
+ return Response.temporaryRedirect(uri).build();
+ }
case GETFILESTATUS:
- final NameNode namenode = (NameNode)context.getAttribute("name.node");
- final String fullpath = path.getAbsolutePath();
- final HdfsFileStatus status =
namenode.getRpcServer().getFileInfo(fullpath);
+ {
+ final HdfsFileStatus status = np.getFileInfo(fullpath);
final String js = JsonUtil.toJsonString(status);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-
+ }
+ case LISTSTATUS:
+ {
+ final StreamingOutput streaming = getListingStream(np, fullpath);
+ return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
+ }
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
+ private static DirectoryListing getDirectoryListing(final NamenodeProtocols
np,
+ final String p, byte[] startAfter) throws IOException {
+ final DirectoryListing listing = np.getListing(p, startAfter, false);
+ if (listing == null) { // the directory does not exist
+ throw new FileNotFoundException("File " + p + " does not exist.");
+ }
+ return listing;
+ }
+
+ private static StreamingOutput getListingStream(final NamenodeProtocols np,
+ final String p) throws IOException {
+ final DirectoryListing first = getDirectoryListing(np, p,
+ HdfsFileStatus.EMPTY_NAME);
+
+ return new StreamingOutput() {
+ @Override
+ public void write(final OutputStream outstream) throws IOException {
+ final PrintStream out = new PrintStream(outstream);
+ out.print('[');
+
+ final HdfsFileStatus[] partial = first.getPartialListing();
+ if (partial.length > 0) {
+ out.print(JsonUtil.toJsonString(partial[0]));
+ }
+ for(int i = 1; i < partial.length; i++) {
+ out.println(',');
+ out.print(JsonUtil.toJsonString(partial[i]));
+ }
+
+ for(DirectoryListing curr = first; curr.hasMore(); ) {
+ curr = getDirectoryListing(np, p, curr.getLastName());
+ for(HdfsFileStatus s : curr.getPartialListing()) {
+ out.println(',');
+ out.print(JsonUtil.toJsonString(s));
+ }
+ }
+
+ out.println(']');
+ }
+ };
+ }
+
/** Handle HTTP DELETE request. */
@DELETE
@Path("{path:.*}")
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
Wed Oct 26 19:33:44 2011
@@ -30,6 +30,7 @@ import java.net.URL;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
@@ -37,6 +38,7 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.ByteRangeInputStream;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HftpFileSystem;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -122,12 +124,11 @@ public class WebHdfsFileSystem extends H
}
@SuppressWarnings("unchecked")
- private static Map<String, Object> jsonParse(final InputStream in
- ) throws IOException {
+ private static <T> T jsonParse(final InputStream in) throws IOException {
if (in == null) {
throw new IOException("The input stream is null.");
}
- return (Map<String, Object>)JSON.parse(new InputStreamReader(in));
+ return (T)JSON.parse(new InputStreamReader(in));
}
private static void validateResponse(final HttpOpParam.Op op,
@@ -138,7 +139,7 @@ public class WebHdfsFileSystem extends H
try {
m = jsonParse(conn.getErrorStream());
} catch(IOException e) {
- throw new IOException("Unexpected HTTP response: code = " + code + "
!= "
+ throw new IOException("Unexpected HTTP response: code=" + code + " != "
+ op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
+ ", message=" + conn.getResponseMessage(), e);
}
@@ -155,22 +156,26 @@ public class WebHdfsFileSystem extends H
}
}
- @Override
- protected HttpURLConnection openConnection(String path, String query)
- throws IOException {
- query = addDelegationTokenParam(query);
+ private URL toUrl(final HttpOpParam.Op op, final Path fspath,
+ final Param<?,?>... parameters) throws IOException {
+ //initialize URI path and query
+ final String path = "/" + PATH_PREFIX
+ + makeQualified(fspath).toUri().getPath();
+ final String query = op.toQueryString()
+ + Param.toSortedString("&", parameters);
final URL url = getNamenodeURL(path, query);
- return (HttpURLConnection)url.openConnection();
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("url=" + url);
+ }
+ return url;
}
private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path
fspath,
final Param<?,?>... parameters) throws IOException {
- //initialize URI path and query
- final String uripath = "/" + PATH_PREFIX +
makeQualified(fspath).toUri().getPath();
- final String query = op.toQueryString() + Param.toSortedString("&",
parameters);
+ final URL url = toUrl(op, fspath, parameters);
//connect and get response
- final HttpURLConnection conn = openConnection(uripath, query);
+ final HttpURLConnection conn = (HttpURLConnection)url.openConnection();
try {
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
@@ -186,7 +191,17 @@ public class WebHdfsFileSystem extends H
}
}
- private Map<String, Object> run(final HttpOpParam.Op op, final Path fspath,
+ /**
+ * Run a http operation.
+ * Connect to the http server, validate response, and obtain the JSON output.
+ *
+ * @param op http operation
+ * @param fspath file system path
+ * @param parameters parameters for the operation
+ * @return a JSON object, e.g. Object[], Map<String, Object>, etc.
+ * @throws IOException
+ */
+ private <T> T run(final HttpOpParam.Op op, final Path fspath,
final Param<?,?>... parameters) throws IOException {
final HttpURLConnection conn = httpConnect(op, fspath, parameters);
validateResponse(op, conn);
@@ -342,4 +357,30 @@ public class WebHdfsFileSystem extends H
final Map<String, Object> json = run(op, f, new RecursiveParam(recursive));
return (Boolean)json.get(op.toString());
}
+
+ @Override
+ public FSDataInputStream open(final Path f, final int buffersize
+ ) throws IOException {
+ statistics.incrementReadOps(1);
+ final HttpOpParam.Op op = GetOpParam.Op.OPEN;
+ final URL url = toUrl(op, f, new BufferSizeParam(buffersize));
+ return new FSDataInputStream(new ByteRangeInputStream(url));
+ }
+
+ @Override
+ public FileStatus[] listStatus(final Path f) throws IOException {
+ statistics.incrementReadOps(1);
+
+ final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
+ final Object[] array = run(op, f);
+
+ //convert FileStatus
+ final FileStatus[] statuses = new FileStatus[array.length];
+ for(int i = 0; i < array.length; i++) {
+ @SuppressWarnings("unchecked")
+ final Map<String, Object> m = (Map<String, Object>)array[i];
+ statuses[i] = makeQualified(JsonUtil.toFileStatus(m), f);
+ }
+ return statuses;
+ }
}
\ No newline at end of file
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
Wed Oct 26 19:33:44 2011
@@ -26,7 +26,11 @@ public class GetOpParam extends HttpOpPa
/** Get operations. */
public static enum Op implements HttpOpParam.Op {
+ OPEN(HttpURLConnection.HTTP_OK),
+
GETFILESTATUS(HttpURLConnection.HTTP_OK),
+ LISTSTATUS(HttpURLConnection.HTTP_OK),
+
NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final int expectedHttpResponseCode;
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UriFsPathParam.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UriFsPathParam.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UriFsPathParam.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UriFsPathParam.java
Wed Oct 26 19:33:44 2011
@@ -39,7 +39,7 @@ public class UriFsPathParam extends Stri
/** @return the absolute path. */
public final String getAbsolutePath() {
- final String path = getValue();
+ final String path = getValue(); //The first / has been stripped out.
return path == null? null: "/" + path;
}
}
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
Wed Oct 26 19:33:44 2011
@@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
@@ -1466,18 +1465,6 @@ public class MiniDFSCluster {
}
/**
- * @return a {@link WebHdfsFileSystem} object.
- */
- public WebHdfsFileSystem getWebHdfsFileSystem() throws IOException {
- final String str = WebHdfsFileSystem.SCHEME + "://" +
conf.get("dfs.http.address");
- try {
- return (WebHdfsFileSystem)FileSystem.get(new URI(str), conf);
- } catch (URISyntaxException e) {
- throw new IOException(e);
- }
- }
-
- /**
* @return a {@link HftpFileSystem} object as specified user.
*/
public HftpFileSystem getHftpFileSystemAs(final String username,
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
Wed Oct 26 19:33:44 2011
@@ -17,80 +17,63 @@
*/
package org.apache.hadoop.hdfs.web;
-
-import static org.apache.hadoop.fs.FileSystemTestHelper.exists;
-import static org.apache.hadoop.fs.FileSystemTestHelper.getDefaultBlockSize;
-import static org.apache.hadoop.fs.FileSystemTestHelper.getTestRootPath;
-
-import java.io.IOException;
+import java.net.URI;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import
org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
import org.apache.log4j.Level;
-import org.junit.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
import org.junit.Test;
public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
{
((Log4JLogger)ExceptionHandler.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)DatanodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
}
- private static final MiniDFSCluster cluster;
- private static final Path defaultWorkingDirectory;
+ private static MiniDFSCluster cluster = null;
+ private static Path defaultWorkingDirectory;
- static {
+ @BeforeClass
+ public static void setupCluster() {
Configuration conf = new Configuration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
- fSys = cluster.getWebHdfsFileSystem();
+ cluster.waitActive();
+
+ final String uri = WebHdfsFileSystem.SCHEME + "://"
+ + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+ fSys = FileSystem.get(new URI(uri), conf);
defaultWorkingDirectory = fSys.getWorkingDirectory();
- } catch (IOException e) {
+ } catch (Exception e) {
throw new RuntimeException(e);
}
}
- @Override
- protected Path getDefaultWorkingDirectory() {
- return defaultWorkingDirectory;
+ @AfterClass
+ public static void shutdownCluster() {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
}
- /** Override the following method without using position read. */
@Override
- protected void writeReadAndDelete(int len) throws IOException {
- Path path = getTestRootPath(fSys, "test/hadoop/file");
- fSys.mkdirs(path.getParent());
-
- FSDataOutputStream out =
- fSys.create(path, false, 4096, (short) 1, getDefaultBlockSize() );
- out.write(data, 0, len);
- out.close();
-
- Assert.assertTrue("Exists", exists(fSys, path));
- Assert.assertEquals("Length", len, fSys.getFileStatus(path).getLen());
-
- FSDataInputStream in = fSys.open(path);
- for (int i = 0; i < len; i++) {
- final int b = in.read();
- Assert.assertEquals("Position " + i, data[i], b);
- }
- in.close();
- Assert.assertTrue("Deleted", fSys.delete(path, false));
- Assert.assertFalse("No longer exists", exists(fSys, path));
+ protected Path getDefaultWorkingDirectory() {
+ return defaultWorkingDirectory;
}
-
- //The following tests failed for HftpFileSystem,
- //Disable it for WebHdfsFileSystem
- @Test
- public void testListStatusThrowsExceptionForNonExistentFile() {}
+ //The following test failed since WebHdfsFileSystem did not support
+ //authentication.
+ //Disable it.
@Test
public void testListStatusThrowsExceptionForUnreadableDir() {}
- @Test
- public void testGlobStatusThrowsExceptionForNonExistentFile() {}
}
\ No newline at end of file
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1189405&r1=1189404&r2=1189405&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
Wed Oct 26 19:33:44 2011
@@ -19,23 +19,23 @@
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
+import java.net.URI;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.security.UserGroupInformation;
public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
+ private static final Configuration conf = new Configuration();
private static final MiniDFSCluster cluster;
private String defaultWorkingDirectory;
static {
- Configuration conf = new Configuration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ cluster.waitActive();
} catch (IOException e) {
throw new RuntimeException(e);
}
@@ -43,44 +43,14 @@ public class TestWebHdfsFileSystemContra
@Override
protected void setUp() throws Exception {
- fs = cluster.getWebHdfsFileSystem();
- defaultWorkingDirectory = "/user/"
- + UserGroupInformation.getCurrentUser().getShortUserName();
+ final String uri = WebHdfsFileSystem.SCHEME + "://"
+ + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+ fs = FileSystem.get(new URI(uri), conf);
+ defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
}
@Override
protected String getDefaultWorkingDirectory() {
return defaultWorkingDirectory;
}
-
- /** Override the following method without using position read. */
- @Override
- protected void writeReadAndDelete(int len) throws IOException {
- Path path = path("/test/hadoop/file");
-
- fs.mkdirs(path.getParent());
-
- FSDataOutputStream out = fs.create(path, false,
- fs.getConf().getInt("io.file.buffer.size", 4096),
- (short) 1, getBlockSize());
- out.write(data, 0, len);
- out.close();
-
- assertTrue("Exists", fs.exists(path));
- assertEquals("Length", len, fs.getFileStatus(path).getLen());
-
- FSDataInputStream in = fs.open(path);
- for (int i = 0; i < len; i++) {
- final int b = in.read();
- assertEquals("Position " + i, data[i], b);
- }
- in.close();
-
- assertTrue("Deleted", fs.delete(path, false));
- assertFalse("No longer exists", fs.exists(path));
- }
-
- //The following test failed for HftpFileSystem,
- //Disable it for WebHdfsFileSystem
- public void testListStatusThrowsExceptionForNonExistentFile() {}
}