Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Thu Oct 6 01:16:48 2011 @@ -547,7 +547,7 @@ class NameNodeRpcServer implements Namen } @Override // ClientProtocol - public void rename(String src, String dst, Options.Rename... options) + public void rename2(String src, String dst, Options.Rename... options) throws IOException { nn.checkOperation(OperationCategory.WRITE); if(stateChangeLog.isDebugEnabled()) { @@ -561,13 +561,6 @@ class NameNodeRpcServer implements Namen metrics.incrFilesRenamed(); } - @Deprecated - @Override // ClientProtocol - public boolean delete(String src) throws IOException { - nn.checkOperation(OperationCategory.WRITE); - return delete(src, true); - } - @Override // ClientProtocol public boolean delete(String src, boolean recursive) throws IOException { nn.checkOperation(OperationCategory.WRITE);
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Thu Oct 6 01:16:48 2011 @@ -46,6 +46,7 @@ import javax.ws.rs.core.StreamingOutput; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.Options; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -112,7 +113,9 @@ public class NamenodeWebHdfsMethods { private static DatanodeInfo chooseDatanode(final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset ) throws IOException { - if (op == GetOpParam.Op.OPEN || op == PostOpParam.Op.APPEND) { + if (op == GetOpParam.Op.OPEN + || op == GetOpParam.Op.GETFILECHECKSUM + || op == PostOpParam.Op.APPEND) { final NamenodeProtocols np = namenode.getRpcServer(); final HdfsFileStatus status = np.getFileInfo(path); final long len = status.getLen(); @@ -245,7 +248,7 @@ public class NamenodeWebHdfsMethods { case MKDIRS: { final boolean b = np.mkdirs(fullpath, permission.getFsPermission(), true); - final String js = JsonUtil.toJsonString(PutOpParam.Op.MKDIRS, b); + final String js = JsonUtil.toJsonString("boolean", b); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } case RENAME: @@ -254,10 +257,10 @@ public class NamenodeWebHdfsMethods { if (s.isEmpty()) { @SuppressWarnings("deprecation") final boolean b = np.rename(fullpath, dstPath.getValue()); - final String js = JsonUtil.toJsonString(PutOpParam.Op.RENAME, b); + final String js = JsonUtil.toJsonString("boolean", b); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { - np.rename(fullpath, dstPath.getValue(), + np.rename2(fullpath, dstPath.getValue(), s.toArray(new Options.Rename[s.size()])); return Response.ok().type(MediaType.APPLICATION_JSON).build(); } @@ -265,7 +268,7 @@ public class NamenodeWebHdfsMethods { case SETREPLICATION: { final boolean b = np.setReplication(fullpath, replication.getValue()); - final String js = JsonUtil.toJsonString(PutOpParam.Op.SETREPLICATION, b); + final String js = JsonUtil.toJsonString("boolean", b); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } case SETOWNER: @@ -431,6 +434,18 @@ public class NamenodeWebHdfsMethods { final StreamingOutput streaming = getListingStream(np, fullpath); return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build(); } + case GETCONTENTSUMMARY: + { + final ContentSummary contentsummary = np.getContentSummary(fullpath); + final String js = JsonUtil.toJsonString(contentsummary); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } + case GETFILECHECKSUM: + { + final URI uri = redirectURI(namenode, ugi, delegation, fullpath, + op.getValue(), -1L); + return Response.temporaryRedirect(uri).build(); + } case GETDELEGATIONTOKEN: { final Token<? extends TokenIdentifier> token = generateDelegationToken( @@ -467,7 +482,7 @@ public class NamenodeWebHdfsMethods { @Override public void write(final OutputStream outstream) throws IOException { final PrintStream out = new PrintStream(outstream); - out.print('['); + out.println("{\"" + HdfsFileStatus[].class.getSimpleName() + "\":["); final HdfsFileStatus[] partial = first.getPartialListing(); if (partial.length > 0) { @@ -486,7 +501,7 @@ public class NamenodeWebHdfsMethods { } } - out.println(']'); + out.println("]}"); } }; } @@ -522,7 +537,7 @@ public class NamenodeWebHdfsMethods { case DELETE: { final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue()); - final String js = JsonUtil.toJsonString(DeleteOpParam.Op.DELETE, b); + final String js = JsonUtil.toJsonString("boolean", b); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } default: Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Thu Oct 6 01:16:48 2011 @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.web; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -24,6 +26,8 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -34,45 +38,33 @@ import org.apache.hadoop.hdfs.protocol.L import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.StringUtils; import org.mortbay.util.ajax.JSON; /** JSON Utilities */ public class JsonUtil { - private static class ThreadLocalMap extends ThreadLocal<Map<String, Object>> { - @Override - protected Map<String, Object> initialValue() { - return new TreeMap<String, Object>(); - } - - @Override - public Map<String, Object> get() { - final Map<String, Object> m = super.get(); - m.clear(); - return m; - } - } - - private static final ThreadLocalMap jsonMap = new ThreadLocalMap(); - private static final ThreadLocalMap tokenMap = new ThreadLocalMap(); - private static final ThreadLocalMap datanodeInfoMap = new ThreadLocalMap(); - private static final ThreadLocalMap extendedBlockMap = new ThreadLocalMap(); - private static final ThreadLocalMap locatedBlockMap = new ThreadLocalMap(); - + private static final Object[] EMPTY_OBJECT_ARRAY = {}; private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {}; /** Convert a token object to a Json string. */ public static String toJsonString(final Token<? extends TokenIdentifier> token ) throws IOException { + return toJsonString(Token.class, toJsonMap(token)); + } + + private static Map<String, Object> toJsonMap( + final Token<? extends TokenIdentifier> token) throws IOException { if (token == null) { return null; } - final Map<String, Object> m = tokenMap.get(); + final Map<String, Object> m = new TreeMap<String, Object>(); m.put("urlString", token.encodeToUrlString()); - return JSON.toString(m); + return m; } /** Convert a Json map to a Token. */ @@ -91,46 +83,52 @@ public class JsonUtil { /** Convert a Json map to a Token of DelegationTokenIdentifier. */ @SuppressWarnings("unchecked") public static Token<DelegationTokenIdentifier> toDelegationToken( - final Map<?, ?> m) throws IOException { + final Map<?, ?> json) throws IOException { + final Map<?, ?> m = (Map<?, ?>)json.get(Token.class.getSimpleName()); return (Token<DelegationTokenIdentifier>)toToken(m); } /** Convert a Json map to a Token of BlockTokenIdentifier. */ @SuppressWarnings("unchecked") - public static Token<BlockTokenIdentifier> toBlockToken( + private static Token<BlockTokenIdentifier> toBlockToken( final Map<?, ?> m) throws IOException { return (Token<BlockTokenIdentifier>)toToken(m); } /** Convert an exception object to a Json string. */ public static String toJsonString(final Exception e) { - final Map<String, Object> m = jsonMap.get(); + final Map<String, Object> m = new TreeMap<String, Object>(); m.put("className", e.getClass().getName()); m.put("message", e.getMessage()); - return JSON.toString(m); + return toJsonString(RemoteException.class, m); } /** Convert a Json map to a RemoteException. */ - public static RemoteException toRemoteException(final Map<String, Object> m) { + public static RemoteException toRemoteException(final Map<?, ?> json) { + final Map<?, ?> m = (Map<?, ?>)json.get(RemoteException.class.getSimpleName()); final String className = (String)m.get("className"); final String message = (String)m.get("message"); return new RemoteException(className, message); } + private static String toJsonString(final Class<?> clazz, final Object value) { + return toJsonString(clazz.getSimpleName(), value); + } + /** Convert a key-value pair to a Json string. */ - public static String toJsonString(final Object key, final Object value) { - final Map<String, Object> m = jsonMap.get(); - m.put(key instanceof String ? (String) key : key.toString(), value); + public static String toJsonString(final String key, final Object value) { + final Map<String, Object> m = new TreeMap<String, Object>(); + m.put(key, value); return JSON.toString(m); } /** Convert a FsPermission object to a string. */ - public static String toString(final FsPermission permission) { + private static String toString(final FsPermission permission) { return String.format("%o", permission.toShort()); } /** Convert a string to a FsPermission object. */ - public static FsPermission toFsPermission(final String s) { + private static FsPermission toFsPermission(final String s) { return new FsPermission(Short.parseShort(s, 8)); } @@ -139,7 +137,7 @@ public class JsonUtil { if (status == null) { return null; } else { - final Map<String, Object> m = jsonMap.get(); + final Map<String, Object> m = new TreeMap<String, Object>(); m.put("localName", status.getLocalName()); m.put("isDir", status.isDir()); m.put("isSymlink", status.isSymlink()); @@ -155,21 +153,17 @@ public class JsonUtil { m.put("modificationTime", status.getModificationTime()); m.put("blockSize", status.getBlockSize()); m.put("replication", status.getReplication()); - return JSON.toString(m); + return toJsonString(HdfsFileStatus.class, m); } } - @SuppressWarnings("unchecked") - static Map<String, Object> parse(String jsonString) { - return (Map<String, Object>) JSON.parse(jsonString); - } - /** Convert a Json map to a HdfsFileStatus object. */ - public static HdfsFileStatus toFileStatus(final Map<String, Object> m) { - if (m == null) { + public static HdfsFileStatus toFileStatus(final Map<?, ?> json) { + if (json == null) { return null; } + final Map<?, ?> m = (Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName()); final String localName = (String) m.get("localName"); final boolean isDir = (Boolean) m.get("isDir"); final boolean isSymlink = (Boolean) m.get("isSymlink"); @@ -189,22 +183,22 @@ public class JsonUtil { symlink, DFSUtil.string2Bytes(localName)); } - /** Convert a LocatedBlock to a Json string. */ - public static String toJsonString(final ExtendedBlock extendedblock) { + /** Convert an ExtendedBlock to a Json map. */ + private static Map<String, Object> toJsonMap(final ExtendedBlock extendedblock) { if (extendedblock == null) { return null; } - final Map<String, Object> m = extendedBlockMap.get(); + final Map<String, Object> m = new TreeMap<String, Object>(); m.put("blockPoolId", extendedblock.getBlockPoolId()); m.put("blockId", extendedblock.getBlockId()); m.put("numBytes", extendedblock.getNumBytes()); m.put("generationStamp", extendedblock.getGenerationStamp()); - return JSON.toString(m); + return m; } /** Convert a Json map to an ExtendedBlock object. */ - public static ExtendedBlock toExtendedBlock(final Map<?, ?> m) { + private static ExtendedBlock toExtendedBlock(final Map<?, ?> m) { if (m == null) { return null; } @@ -216,13 +210,13 @@ public class JsonUtil { return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp); } - /** Convert a DatanodeInfo to a Json string. */ - public static String toJsonString(final DatanodeInfo datanodeinfo) { + /** Convert a DatanodeInfo to a Json map. */ + private static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) { if (datanodeinfo == null) { return null; } - final Map<String, Object> m = datanodeInfoMap.get(); + final Map<String, Object> m = new TreeMap<String, Object>(); m.put("name", datanodeinfo.getName()); m.put("storageID", datanodeinfo.getStorageID()); m.put("infoPort", datanodeinfo.getInfoPort()); @@ -238,11 +232,11 @@ public class JsonUtil { m.put("networkLocation", datanodeinfo.getNetworkLocation()); m.put("hostName", datanodeinfo.getHostName()); m.put("adminState", datanodeinfo.getAdminState().name()); - return JSON.toString(m); + return m; } /** Convert a Json map to an DatanodeInfo object. */ - public static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) { + private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) { if (m == null) { return null; } @@ -264,25 +258,23 @@ public class JsonUtil { AdminStates.valueOf((String)m.get("adminState"))); } - /** Convert a DatanodeInfo[] to a Json string. */ - public static String toJsonString(final DatanodeInfo[] array - ) throws IOException { + /** Convert a DatanodeInfo[] to a Json array. */ + private static Object[] toJsonArray(final DatanodeInfo[] array) { if (array == null) { return null; } else if (array.length == 0) { - return "[]"; + return EMPTY_OBJECT_ARRAY; } else { - final StringBuilder b = new StringBuilder().append('[').append( - toJsonString(array[0])); - for(int i = 1; i < array.length; i++) { - b.append(", ").append(toJsonString(array[i])); + final Object[] a = new Object[array.length]; + for(int i = 0; i < array.length; i++) { + a[i] = toJsonMap(array[i]); } - return b.append(']').toString(); + return a; } } /** Convert an Object[] to a DatanodeInfo[]. */ - public static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) { + private static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) { if (objects == null) { return null; } else if (objects.length == 0) { @@ -290,66 +282,63 @@ public class JsonUtil { } else { final DatanodeInfo[] array = new DatanodeInfo[objects.length]; for(int i = 0; i < array.length; i++) { - array[i] = (DatanodeInfo)toDatanodeInfo((Map<?, ?>) objects[i]); + array[i] = toDatanodeInfo((Map<?, ?>) objects[i]); } return array; } } - /** Convert a LocatedBlock to a Json string. */ - public static String toJsonString(final LocatedBlock locatedblock + /** Convert a LocatedBlock to a Json map. */ + private static Map<String, Object> toJsonMap(final LocatedBlock locatedblock ) throws IOException { if (locatedblock == null) { return null; } - final Map<String, Object> m = locatedBlockMap.get(); - m.put("blockToken", toJsonString(locatedblock.getBlockToken())); + final Map<String, Object> m = new TreeMap<String, Object>(); + m.put("blockToken", toJsonMap(locatedblock.getBlockToken())); m.put("isCorrupt", locatedblock.isCorrupt()); m.put("startOffset", locatedblock.getStartOffset()); - m.put("block", toJsonString(locatedblock.getBlock())); - - m.put("locations", toJsonString(locatedblock.getLocations())); - return JSON.toString(m); + m.put("block", toJsonMap(locatedblock.getBlock())); + m.put("locations", toJsonArray(locatedblock.getLocations())); + return m; } /** Convert a Json map to LocatedBlock. */ - public static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException { + private static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException { if (m == null) { return null; } - final ExtendedBlock b = toExtendedBlock((Map<?, ?>)JSON.parse((String)m.get("block"))); + final ExtendedBlock b = toExtendedBlock((Map<?, ?>)m.get("block")); final DatanodeInfo[] locations = toDatanodeInfoArray( - (Object[])JSON.parse((String)m.get("locations"))); + (Object[])m.get("locations")); final long startOffset = (Long)m.get("startOffset"); final boolean isCorrupt = (Boolean)m.get("isCorrupt"); final LocatedBlock locatedblock = new LocatedBlock(b, locations, startOffset, isCorrupt); - locatedblock.setBlockToken(toBlockToken((Map<?, ?>)JSON.parse((String)m.get("blockToken")))); + locatedblock.setBlockToken(toBlockToken((Map<?, ?>)m.get("blockToken"))); return locatedblock; } - /** Convert a LocatedBlock[] to a Json string. */ - public static String toJsonString(final List<LocatedBlock> array + /** Convert a LocatedBlock[] to a Json array. */ + private static Object[] toJsonArray(final List<LocatedBlock> array ) throws IOException { if (array == null) { return null; } else if (array.size() == 0) { - return "[]"; + return EMPTY_OBJECT_ARRAY; } else { - final StringBuilder b = new StringBuilder().append('[').append( - toJsonString(array.get(0))); - for(int i = 1; i < array.size(); i++) { - b.append(",\n ").append(toJsonString(array.get(i))); + final Object[] a = new Object[array.size()]; + for(int i = 0; i < array.size(); i++) { + a[i] = toJsonMap(array.get(0)); } - return b.append(']').toString(); + return a; } } - /** Convert an Object[] to a List of LocatedBlock. - * @throws IOException */ - public static List<LocatedBlock> toLocatedBlockList(final Object[] objects + /** Convert an Object[] to a List of LocatedBlock. */ + private static List<LocatedBlock> toLocatedBlockList(final Object[] objects ) throws IOException { if (objects == null) { return null; @@ -358,7 +347,7 @@ public class JsonUtil { } else { final List<LocatedBlock> list = new ArrayList<LocatedBlock>(objects.length); for(int i = 0; i < objects.length; i++) { - list.add((LocatedBlock)toLocatedBlock((Map<?, ?>)objects[i])); + list.add(toLocatedBlock((Map<?, ?>)objects[i])); } return list; } @@ -371,31 +360,115 @@ public class JsonUtil { return null; } - final Map<String, Object> m = jsonMap.get(); + final Map<String, Object> m = new TreeMap<String, Object>(); m.put("fileLength", locatedblocks.getFileLength()); m.put("isUnderConstruction", locatedblocks.isUnderConstruction()); - m.put("locatedBlocks", toJsonString(locatedblocks.getLocatedBlocks())); - m.put("lastLocatedBlock", toJsonString(locatedblocks.getLastLocatedBlock())); + m.put("locatedBlocks", toJsonArray(locatedblocks.getLocatedBlocks())); + m.put("lastLocatedBlock", toJsonMap(locatedblocks.getLastLocatedBlock())); m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete()); - return JSON.toString(m); + return toJsonString(LocatedBlocks.class, m); } /** Convert a Json map to LocatedBlock. */ - public static LocatedBlocks toLocatedBlocks(final Map<String, Object> m + public static LocatedBlocks toLocatedBlocks(final Map<?, ?> json ) throws IOException { - if (m == null) { + if (json == null) { return null; } - + + final Map<?, ?> m = (Map<?, ?>)json.get(LocatedBlocks.class.getSimpleName()); final long fileLength = (Long)m.get("fileLength"); final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction"); final List<LocatedBlock> locatedBlocks = toLocatedBlockList( - (Object[])JSON.parse((String) m.get("locatedBlocks"))); + (Object[])m.get("locatedBlocks")); final LocatedBlock lastLocatedBlock = toLocatedBlock( - (Map<?, ?>)JSON.parse((String)m.get("lastLocatedBlock"))); + (Map<?, ?>)m.get("lastLocatedBlock")); final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete"); return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks, lastLocatedBlock, isLastBlockComplete); } + + /** Convert a ContentSummary to a Json string. */ + public static String toJsonString(final ContentSummary contentsummary) { + if (contentsummary == null) { + return null; + } + + final Map<String, Object> m = new TreeMap<String, Object>(); + m.put("length", contentsummary.getLength()); + m.put("fileCount", contentsummary.getFileCount()); + m.put("directoryCount", contentsummary.getDirectoryCount()); + m.put("quota", contentsummary.getQuota()); + m.put("spaceConsumed", contentsummary.getSpaceConsumed()); + m.put("spaceQuota", contentsummary.getSpaceQuota()); + return toJsonString(ContentSummary.class, m); + } + + /** Convert a Json map to a ContentSummary. */ + public static ContentSummary toContentSummary(final Map<?, ?> json) { + if (json == null) { + return null; + } + + final Map<?, ?> m = (Map<?, ?>)json.get(ContentSummary.class.getSimpleName()); + final long length = (Long)m.get("length"); + final long fileCount = (Long)m.get("fileCount"); + final long directoryCount = (Long)m.get("directoryCount"); + final long quota = (Long)m.get("quota"); + final long spaceConsumed = (Long)m.get("spaceConsumed"); + final long spaceQuota = (Long)m.get("spaceQuota"); + + return new ContentSummary(length, fileCount, directoryCount, + quota, spaceConsumed, spaceQuota); + } + + /** Convert a MD5MD5CRC32FileChecksum to a Json string. */ + public static String toJsonString(final MD5MD5CRC32FileChecksum checksum) { + if (checksum == null) { + return null; + } + + final Map<String, Object> m = new TreeMap<String, Object>(); + m.put("algorithm", checksum.getAlgorithmName()); + m.put("length", checksum.getLength()); + m.put("bytes", StringUtils.byteToHexString(checksum.getBytes())); + return toJsonString(MD5MD5CRC32FileChecksum.class, m); + } + + /** Convert a Json map to a MD5MD5CRC32FileChecksum. */ + public static MD5MD5CRC32FileChecksum toMD5MD5CRC32FileChecksum( + final Map<?, ?> json) throws IOException { + if (json == null) { + return null; + } + + final Map<?, ?> m = (Map<?, ?>)json.get( + MD5MD5CRC32FileChecksum.class.getSimpleName()); + final String algorithm = (String)m.get("algorithm"); + final int length = (int)(long)(Long)m.get("length"); + final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes")); + + final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes)); + final int bytesPerCRC = in.readInt(); + final long crcPerBlock = in.readLong(); + final MD5Hash md5 = MD5Hash.read(in); + final MD5MD5CRC32FileChecksum checksum = new MD5MD5CRC32FileChecksum( + bytesPerCRC, crcPerBlock, md5); + + //check algorithm name + final String alg = "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC + "CRC32"; + if (!alg.equals(algorithm)) { + throw new IOException("Algorithm not matched: algorithm=" + algorithm + + ", crcPerBlock=" + crcPerBlock + + ", bytesPerCRC=" + bytesPerCRC); + } + //check length + if (length != checksum.getLength()) { + throw new IOException("Length not matched: length=" + length + + ", checksum.getLength()=" + checksum.getLength()); + } + + return checksum; + } } \ No newline at end of file Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Thu Oct 6 01:16:48 2011 @@ -33,10 +33,12 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; @@ -278,7 +280,7 @@ public class WebHdfsFileSystem extends H final HttpOpParam.Op op = PutOpParam.Op.MKDIRS; final Map<String, Object> json = run(op, f, new PermissionParam(applyUMask(permission))); - return (Boolean)json.get(op.toString()); + return (Boolean)json.get("boolean"); } @Override @@ -287,7 +289,7 @@ public class WebHdfsFileSystem extends H final HttpOpParam.Op op = PutOpParam.Op.RENAME; final Map<String, Object> json = run(op, src, new DstPathParam(makeQualified(dst).toUri().getPath())); - return (Boolean)json.get(op.toString()); + return (Boolean)json.get("boolean"); } @SuppressWarnings("deprecation") @@ -327,7 +329,7 @@ public class WebHdfsFileSystem extends H final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION; final Map<String, Object> json = run(op, p, new ReplicationParam(replication)); - return (Boolean)json.get(op.toString()); + return (Boolean)json.get("boolean"); } @Override @@ -384,7 +386,7 @@ public class WebHdfsFileSystem extends H public boolean delete(Path f, boolean recursive) throws IOException { final HttpOpParam.Op op = DeleteOpParam.Op.DELETE; final Map<String, Object> json = run(op, f, new RecursiveParam(recursive)); - return (Boolean)json.get(op.toString()); + return (Boolean)json.get("boolean"); } @Override @@ -401,7 +403,9 @@ public class WebHdfsFileSystem extends H statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; - final Object[] array = run(op, f); + final Map<?, ?> json = run(op, f); + final Object[] array = (Object[])json.get( + HdfsFileStatus[].class.getSimpleName()); //convert FileStatus final FileStatus[] statuses = new FileStatus[array.length]; @@ -449,4 +453,23 @@ public class WebHdfsFileSystem extends H new LengthParam(length)); return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m)); } + + @Override + public ContentSummary getContentSummary(final Path p) throws IOException { + statistics.incrementReadOps(1); + + final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY; + final Map<String, Object> m = run(op, p); + return JsonUtil.toContentSummary(m); + } + + @Override + public MD5MD5CRC32FileChecksum getFileChecksum(final Path p + ) throws IOException { + statistics.incrementReadOps(1); + + final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM; + final Map<String, Object> m = run(op, p); + return JsonUtil.toMD5MD5CRC32FileChecksum(m); + } } \ No newline at end of file Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Thu Oct 6 01:16:48 2011 @@ -28,6 +28,8 @@ public class GetOpParam extends HttpOpPa GETFILESTATUS(HttpURLConnection.HTTP_OK), LISTSTATUS(HttpURLConnection.HTTP_OK), + GETCONTENTSUMMARY(HttpURLConnection.HTTP_OK), + GETFILECHECKSUM(HttpURLConnection.HTTP_OK), GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK), Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Thu Oct 6 01:16:48 2011 @@ -1,4 +1,4 @@ -/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1177128 +/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1179483 /hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112 /hadoop/core/trunk/src/c++/libhdfs:776175-784663 Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/packages/deb/hadoop.control/preinst URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/packages/deb/hadoop.control/preinst?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/packages/deb/hadoop.control/preinst (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/packages/deb/hadoop.control/preinst Thu Oct 6 01:16:48 2011 @@ -15,4 +15,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -/usr/sbin/useradd --comment "Hadoop HDFS" --shell /bin/bash -M -r --groups hadoop --home /var/lib/hadoop/hdfs hdfs 2> /dev/null || : +/usr/sbin/useradd --comment "Hadoop HDFS" -u 201 --shell /bin/bash -M -r --groups hadoop --home /var/lib/hadoop/hdfs hdfs 2> /dev/null || : Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/packages/rpm/spec/hadoop-hdfs.spec URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/packages/rpm/spec/hadoop-hdfs.spec?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/packages/rpm/spec/hadoop-hdfs.spec (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/packages/rpm/spec/hadoop-hdfs.spec Thu Oct 6 01:16:48 2011 @@ -144,8 +144,8 @@ mv ${RPM_BUILD_DIR}/%{_final_name}/share rm -rf ${RPM_BUILD_DIR}/%{_final_name}/etc %pre -getent group hadoop 2>/dev/null >/dev/null || /usr/sbin/groupadd -r hadoop -/usr/sbin/useradd --comment "Hadoop HDFS" --shell /bin/bash -M -r --groups hadoop --home %{_var_dir}/hdfs hdfs 2> /dev/null || : +getent group hadoop 2>/dev/null >/dev/null || /usr/sbin/groupadd -g 123 -r hadoop +/usr/sbin/useradd --comment "Hadoop HDFS" -u 201 --shell /bin/bash -M -r --groups hadoop --home %{_var_dir}/hdfs hdfs 2> /dev/null || : %post bash ${RPM_INSTALL_PREFIX0}/sbin/update-hdfs-env.sh \ Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Thu Oct 6 01:16:48 2011 @@ -1,4 +1,4 @@ -/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1177128 +/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1179483 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112 /hadoop/core/trunk/src/webapps/datanode:776175-784663 Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Thu Oct 6 01:16:48 2011 @@ -1,4 +1,4 @@ -/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1177128 +/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1179483 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112 /hadoop/core/trunk/src/webapps/hdfs:776175-784663 Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Thu Oct 6 01:16:48 2011 @@ -1,4 +1,4 @@ -/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1177128 +/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1179483 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112 /hadoop/core/trunk/src/webapps/secondary:776175-784663 Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Thu Oct 6 01:16:48 2011 @@ -1,4 +1,4 @@ -/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1177128 +/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1179483 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112 /hadoop/core/trunk/src/test/hdfs:776175-785643 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512 Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Thu Oct 6 01:16:48 2011 @@ -26,6 +26,7 @@ import static org.junit.Assert.fail; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; +import java.security.PrivilegedExceptionAction; import java.util.Random; import org.apache.commons.logging.impl.Log4JLogger; @@ -37,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Level; import org.junit.Test; @@ -399,15 +401,40 @@ public class TestDistributedFileSystem { RAN.setSeed(seed); final Configuration conf = getTestConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem hdfs = cluster.getFileSystem(); - final String hftpuri = "hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + + final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + final UserGroupInformation current = UserGroupInformation.getCurrentUser(); + final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + current.getShortUserName() + "x", new String[]{"user"}); + + //hftp + final String hftpuri = "hftp://" + nnAddr; System.out.println("hftpuri=" + hftpuri); - final FileSystem hftp = new Path(hftpuri).getFileSystem(conf); + final FileSystem hftp = ugi.doAs( + new PrivilegedExceptionAction<FileSystem>() { + @Override + public FileSystem run() throws Exception { + return new Path(hftpuri).getFileSystem(conf); + } + }); + + //webhdfs + final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr; + System.out.println("webhdfsuri=" + webhdfsuri); + final FileSystem webhdfs = ugi.doAs( + new PrivilegedExceptionAction<FileSystem>() { + @Override + public FileSystem run() throws Exception { + return new Path(webhdfsuri).getFileSystem(conf); + } + }); - final String dir = "/filechecksum"; + final Path dir = new Path("/filechecksum"); final int block_size = 1024; final int buffer_size = conf.getInt("io.file.buffer.size", 4096); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); @@ -431,7 +458,8 @@ public class TestDistributedFileSystem { //compute checksum final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo); System.out.println("hdfsfoocs=" + hdfsfoocs); - + + //hftp final FileChecksum hftpfoocs = hftp.getFileChecksum(foo); System.out.println("hftpfoocs=" + hftpfoocs); @@ -439,6 +467,14 @@ public class TestDistributedFileSystem { final FileChecksum qfoocs = hftp.getFileChecksum(qualified); System.out.println("qfoocs=" + qfoocs); + //webhdfs + final FileChecksum webhdfsfoocs = webhdfs.getFileChecksum(foo); + System.out.println("webhdfsfoocs=" + webhdfsfoocs); + + final Path webhdfsqualified = new Path(webhdfsuri + dir, "foo" + n); + final FileChecksum webhdfs_qfoocs = webhdfs.getFileChecksum(webhdfsqualified); + System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs); + //write another file final Path bar = new Path(dir, "bar" + n); { @@ -454,24 +490,40 @@ public class TestDistributedFileSystem { assertEquals(hdfsfoocs.hashCode(), barhashcode); assertEquals(hdfsfoocs, barcs); + //hftp assertEquals(hftpfoocs.hashCode(), barhashcode); assertEquals(hftpfoocs, barcs); assertEquals(qfoocs.hashCode(), barhashcode); assertEquals(qfoocs, barcs); + + //webhdfs + assertEquals(webhdfsfoocs.hashCode(), barhashcode); + assertEquals(webhdfsfoocs, barcs); + + assertEquals(webhdfs_qfoocs.hashCode(), barhashcode); + assertEquals(webhdfs_qfoocs, barcs); } + hdfs.setPermission(dir, new FsPermission((short)0)); { //test permission error on hftp - hdfs.setPermission(new Path(dir), new FsPermission((short)0)); try { - final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "1"; - final HftpFileSystem hftp2 = cluster.getHftpFileSystemAs(username, conf, 0, "somegroup"); - hftp2.getFileChecksum(qualified); + hftp.getFileChecksum(qualified); + fail(); + } catch(IOException ioe) { + FileSystem.LOG.info("GOOD: getting an exception", ioe); + } + } + + { //test permission error on webhdfs + try { + webhdfs.getFileChecksum(webhdfsqualified); fail(); } catch(IOException ioe) { FileSystem.LOG.info("GOOD: getting an exception", ioe); } } + hdfs.setPermission(dir, new FsPermission((short)0777)); } cluster.shutdown(); } Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Thu Oct 6 01:16:48 2011 @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.N import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Test; @@ -770,6 +771,11 @@ public class TestQuota { } } + private static void checkContentSummary(final ContentSummary expected, + final ContentSummary computed) { + assertEquals(expected.toString(), computed.toString()); + } + /** * Violate a space quota using files of size < 1 block. Test that block * allocation conservatively assumes that for quota checking the entire @@ -781,12 +787,18 @@ public class TestQuota { Configuration conf = new HdfsConfiguration(); final int BLOCK_SIZE = 6 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); DFSAdmin admin = new DFSAdmin(conf); - + + final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr; + System.out.println("webhdfsuri=" + webhdfsuri); + final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf); + try { Path dir = new Path("/test"); Path file1 = new Path("/test/test1"); @@ -806,6 +818,7 @@ public class TestQuota { DFSTestUtil.createFile(fs, file1, FILE_SIZE, (short) 3, 1L); DFSTestUtil.waitReplication(fs, file1, (short) 3); c = fs.getContentSummary(dir); + checkContentSummary(c, webhdfs.getContentSummary(dir)); assertEquals("Quota is half consumed", QUOTA_SIZE / 2, c.getSpaceConsumed()); @@ -836,12 +849,18 @@ public class TestQuota { Configuration conf = new HdfsConfiguration(); final int BLOCK_SIZE = 6 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); DFSAdmin admin = new DFSAdmin(conf); + final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr; + System.out.println("webhdfsuri=" + webhdfsuri); + final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf); + try { //Test for deafult NameSpace Quota @@ -882,6 +901,7 @@ public class TestQuota { // Should account for all 59 files (almost QUOTA_SIZE) c = fs.getContentSummary(dir); + checkContentSummary(c, webhdfs.getContentSummary(dir)); assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3, c.getSpaceConsumed()); assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3), Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java Thu Oct 6 01:16:48 2011 @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.anyInt; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; @@ -33,6 +34,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -73,7 +75,7 @@ public class TestEditLogJournalFailures public void testSingleFailedEditsDirOnFlush() throws IOException { assertTrue(doAnEdit()); // Invalidate one edits journal. - invalidateEditsDirAtIndex(0, true); + invalidateEditsDirAtIndex(0, true, false); // Make sure runtime.exit(...) hasn't been called at all yet. assertExitInvocations(0); assertTrue(doAnEdit()); @@ -86,8 +88,22 @@ public class TestEditLogJournalFailures public void testAllEditsDirsFailOnFlush() throws IOException { assertTrue(doAnEdit()); // Invalidate both edits journals. - invalidateEditsDirAtIndex(0, true); - invalidateEditsDirAtIndex(1, true); + invalidateEditsDirAtIndex(0, true, false); + invalidateEditsDirAtIndex(1, true, false); + // Make sure runtime.exit(...) hasn't been called at all yet. + assertExitInvocations(0); + assertTrue(doAnEdit()); + // The previous edit could not be synced to any persistent storage, should + // have halted the NN. + assertExitInvocations(1); + } + + @Test + public void testAllEditsDirFailOnWrite() throws IOException { + assertTrue(doAnEdit()); + // Invalidate both edits journals. + invalidateEditsDirAtIndex(0, true, true); + invalidateEditsDirAtIndex(1, true, true); // Make sure runtime.exit(...) hasn't been called at all yet. assertExitInvocations(0); assertTrue(doAnEdit()); @@ -100,7 +116,7 @@ public class TestEditLogJournalFailures public void testSingleFailedEditsDirOnSetReadyToFlush() throws IOException { assertTrue(doAnEdit()); // Invalidate one edits journal. - invalidateEditsDirAtIndex(0, false); + invalidateEditsDirAtIndex(0, false, false); // Make sure runtime.exit(...) hasn't been called at all yet. assertExitInvocations(0); assertTrue(doAnEdit()); @@ -117,16 +133,18 @@ public class TestEditLogJournalFailures * @return the original <code>EditLogOutputStream</code> of the journal. */ private EditLogOutputStream invalidateEditsDirAtIndex(int index, - boolean failOnFlush) throws IOException { + boolean failOnFlush, boolean failOnWrite) throws IOException { FSImage fsimage = cluster.getNamesystem().getFSImage(); FSEditLog editLog = fsimage.getEditLog(); - - FSEditLog.JournalAndStream jas = editLog.getJournals().get(index); + JournalAndStream jas = editLog.getJournals().get(index); EditLogFileOutputStream elos = (EditLogFileOutputStream) jas.getCurrentStream(); EditLogFileOutputStream spyElos = spy(elos); - + if (failOnWrite) { + doThrow(new IOException("fail on write()")).when(spyElos).write( + (FSEditLogOp) any()); + } if (failOnFlush) { doThrow(new IOException("fail on flush()")).when(spyElos).flush(); } else { @@ -151,7 +169,7 @@ public class TestEditLogJournalFailures FSImage fsimage = cluster.getNamesystem().getFSImage(); FSEditLog editLog = fsimage.getEditLog(); - FSEditLog.JournalAndStream jas = editLog.getJournals().get(index); + JournalAndStream jas = editLog.getJournals().get(index); jas.setCurrentStreamForTests(elos); } Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Thu Oct 6 01:16:48 2011 @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.H import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.log4j.Level; @@ -356,7 +357,7 @@ public class TestEditLogRace { FSImage fsimage = namesystem.getFSImage(); FSEditLog editLog = fsimage.getEditLog(); - FSEditLog.JournalAndStream jas = editLog.getJournals().get(0); + JournalAndStream jas = editLog.getJournals().get(0); EditLogFileOutputStream spyElos = spy((EditLogFileOutputStream)jas.getCurrentStream()); jas.setCurrentStreamForTests(spyElos); Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Thu Oct 6 01:16:48 2011 @@ -28,7 +28,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.Set; -import static org.mockito.Matchers.anyByte; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; @@ -45,7 +44,7 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.io.Writable; +import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; @@ -123,7 +122,7 @@ public class TestStorageRestore { // simulate an error fi.getStorage().reportErrorsOnDirectories(al); - for (FSEditLog.JournalAndStream j : fi.getEditLog().getJournals()) { + for (JournalAndStream j : fi.getEditLog().getJournals()) { if (j.getManager() instanceof FileJournalManager) { FileJournalManager fm = (FileJournalManager)j.getManager(); if (fm.getStorageDirectory().getRoot().equals(path2) Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1179484&r1=1179483&r2=1179484&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Thu Oct 6 01:16:48 2011 @@ -17,14 +17,16 @@ */ package org.apache.hadoop.hdfs.web; +import java.util.Map; + import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.web.JsonUtil; import org.junit.Assert; import org.junit.Test; +import org.mortbay.util.ajax.JSON; public class TestJsonUtil { static FileStatus toFileStatus(HdfsFileStatus f, String parent) { @@ -46,7 +48,7 @@ public class TestJsonUtil { System.out.println("fstatus = " + fstatus); final String json = JsonUtil.toJsonString(status); System.out.println("json = " + json.replace(",", ",\n ")); - final HdfsFileStatus s2 = JsonUtil.toFileStatus(JsonUtil.parse(json)); + final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json)); final FileStatus fs2 = toFileStatus(s2, parent); System.out.println("s2 = " + s2); System.out.println("fs2 = " + fs2);
