Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java Tue Jan 22 19:33:02 2013 @@ -20,9 +20,9 @@ package org.apache.hadoop.hdfs.util; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; -import java.io.FileReader; import java.io.IOException; import java.io.InputStream; +import java.io.InputStreamReader; import java.security.DigestInputStream; import java.security.MessageDigest; import java.util.regex.Matcher; @@ -34,6 +34,8 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; +import com.google.common.base.Charsets; + /** * Static functions for dealing with files of the same format * that the Unix "md5sum" utility writes. @@ -78,7 +80,8 @@ public abstract class MD5FileUtils { } BufferedReader reader = - new BufferedReader(new FileReader(md5File)); + new BufferedReader(new InputStreamReader(new FileInputStream( + md5File), Charsets.UTF_8)); try { md5Line = reader.readLine(); if (md5Line == null) { md5Line = ""; } @@ -138,7 +141,7 @@ public abstract class MD5FileUtils { String md5Line = digestString + " *" + dataFile.getName() + "\n"; AtomicFileOutputStream afos = new AtomicFileOutputStream(md5File); - afos.write(md5Line.getBytes()); + afos.write(md5Line.getBytes(Charsets.UTF_8)); afos.close(); LOG.debug("Saved MD5 " + digest + " to " + md5File); }
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java Tue Jan 22 19:33:02 2013 @@ -19,14 +19,18 @@ package org.apache.hadoop.hdfs.util; import java.io.BufferedReader; import java.io.File; +import java.io.FileInputStream; import java.io.FileReader; import java.io.IOException; +import java.io.InputStreamReader; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; +import com.google.common.base.Charsets; + /** * Class that represents a file on disk which persistently stores * a single <code>long</code> value. The file is updated atomically @@ -74,7 +78,7 @@ public class PersistentLongFile { public static void writeFile(File file, long val) throws IOException { AtomicFileOutputStream fos = new AtomicFileOutputStream(file); try { - fos.write(String.valueOf(val).getBytes()); + fos.write(String.valueOf(val).getBytes(Charsets.UTF_8)); fos.write('\n'); fos.close(); fos = null; @@ -88,7 +92,9 @@ public class PersistentLongFile { public static long readFile(File file, long defaultVal) throws IOException { long val = defaultVal; if (file.exists()) { - BufferedReader br = new BufferedReader(new FileReader(file)); + BufferedReader br = + new BufferedReader(new InputStreamReader(new FileInputStream( + file), Charsets.UTF_8)); try { val = Long.valueOf(br.readLine()); br.close(); Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Tue Jan 22 19:33:02 2013 @@ -105,6 +105,8 @@ import org.apache.hadoop.security.token. import org.apache.hadoop.util.Progressable; import org.mortbay.util.ajax.JSON; +import com.google.common.base.Charsets; + /** A FileSystem for HDFS over the web. */ public class WebHdfsFileSystem extends FileSystem implements DelegationTokenRenewer.Renewable { @@ -281,7 +283,7 @@ public class WebHdfsFileSystem extends F + "\" (parsed=\"" + parsed + "\")"); } } - return (Map<?, ?>)JSON.parse(new InputStreamReader(in)); + return (Map<?, ?>)JSON.parse(new InputStreamReader(in, Charsets.UTF_8)); } private static Map<?, ?> validateResponse(final HttpOpParam.Op op, Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1428602-1437112 Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c Tue Jan 22 19:33:02 2013 @@ -52,7 +52,7 @@ struct NativeMiniDfsCluster* nmdCreate(s if (!env) { fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n"); - goto error; + return NULL; } cl = calloc(1, sizeof(struct NativeMiniDfsCluster)); if (!cl) { Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto Tue Jan 22 19:33:02 2013 @@ -25,6 +25,7 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; package hadoop.hdfs; +import "Security.proto"; import "hdfs.proto"; /** @@ -76,7 +77,7 @@ message DeleteBlockPoolResponseProto { */ message GetBlockLocalPathInfoRequestProto { required ExtendedBlockProto block = 1; - required BlockTokenIdentifierProto token = 2; + required hadoop.common.TokenProto token = 2; } /** @@ -96,7 +97,7 @@ message GetBlockLocalPathInfoResponsePro */ message GetHdfsBlockLocationsRequestProto { repeated ExtendedBlockProto blocks = 1; - repeated BlockTokenIdentifierProto tokens = 2; + repeated hadoop.common.TokenProto tokens = 2; } /** Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Tue Jan 22 19:33:02 2013 @@ -22,6 +22,7 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; package hadoop.hdfs; +import "Security.proto"; import "hdfs.proto"; /** @@ -167,7 +168,7 @@ message RenameRequestProto { required string dst = 2; } -message RenameResponseProto { // void response +message RenameResponseProto { required bool result = 1; } @@ -386,7 +387,7 @@ message GetLinkTargetRequestProto { required string path = 1; } message GetLinkTargetResponseProto { - required string targetPath = 1; + optional string targetPath = 1; } message UpdateBlockForPipelineRequestProto { @@ -408,29 +409,6 @@ message UpdatePipelineRequestProto { message UpdatePipelineResponseProto { // void response } -message GetDelegationTokenRequestProto { - required string renewer = 1; -} - -message GetDelegationTokenResponseProto { - required BlockTokenIdentifierProto token = 1; -} - -message RenewDelegationTokenRequestProto { - required BlockTokenIdentifierProto token = 1; -} - -message RenewDelegationTokenResponseProto { - required uint64 newExireTime = 1; -} - -message CancelDelegationTokenRequestProto { - required BlockTokenIdentifierProto token = 1; -} - -message CancelDelegationTokenResponseProto { // void response -} - message SetBalancerBandwidthRequestProto { required int64 bandwidth = 1; } @@ -442,7 +420,7 @@ message GetDataEncryptionKeyRequestProto } message GetDataEncryptionKeyResponseProto { - required DataEncryptionKeyProto dataEncryptionKey = 1; + optional DataEncryptionKeyProto dataEncryptionKey = 1; } service ClientNamenodeProtocol { @@ -508,12 +486,12 @@ service ClientNamenodeProtocol { returns(UpdateBlockForPipelineResponseProto); rpc updatePipeline(UpdatePipelineRequestProto) returns(UpdatePipelineResponseProto); - rpc getDelegationToken(GetDelegationTokenRequestProto) - returns(GetDelegationTokenResponseProto); - rpc renewDelegationToken(RenewDelegationTokenRequestProto) - returns(RenewDelegationTokenResponseProto); - rpc cancelDelegationToken(CancelDelegationTokenRequestProto) - returns(CancelDelegationTokenResponseProto); + rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto) + returns(hadoop.common.GetDelegationTokenResponseProto); + rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto) + returns(hadoop.common.RenewDelegationTokenResponseProto); + rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto) + returns(hadoop.common.CancelDelegationTokenResponseProto); rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto) returns(SetBalancerBandwidthResponseProto); rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto) Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto Tue Jan 22 19:33:02 2013 @@ -56,7 +56,7 @@ message GetBlockKeysRequestProto { * keys - Information about block keys at the active namenode */ message GetBlockKeysResponseProto { - required ExportedBlockKeysProto keys = 1; + optional ExportedBlockKeysProto keys = 1; } /** Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Tue Jan 22 19:33:02 2013 @@ -24,6 +24,7 @@ option java_outer_classname = "DataTrans option java_generate_equals_and_hash = true; package hadoop.hdfs; +import "Security.proto"; import "hdfs.proto"; message DataTransferEncryptorMessageProto { @@ -39,7 +40,7 @@ message DataTransferEncryptorMessageProt message BaseHeaderProto { required ExtendedBlockProto block = 1; - optional BlockTokenIdentifierProto token = 2; + optional hadoop.common.TokenProto token = 2; } message ClientOperationHeaderProto { @@ -51,6 +52,7 @@ message OpReadBlockProto { required ClientOperationHeaderProto header = 1; required uint64 offset = 2; required uint64 len = 3; + optional bool sendChecksums = 4 [default = true]; } @@ -181,5 +183,5 @@ message OpBlockChecksumResponseProto { required uint32 bytesPerCrc = 1; required uint64 crcPerBlock = 2; required bytes md5 = 3; - optional ChecksumTypeProto crcType = 4 [default = CHECKSUM_CRC32]; + optional ChecksumTypeProto crcType = 4; } Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Tue Jan 22 19:33:02 2013 @@ -19,11 +19,14 @@ // This file contains protocol buffers that are used throughout HDFS -- i.e. // by the client, server, and data transfer protocols. + option java_package = "org.apache.hadoop.hdfs.protocol.proto"; option java_outer_classname = "HdfsProtos"; option java_generate_equals_and_hash = true; package hadoop.hdfs; +import "Security.proto"; + /** * Extended block idenfies a block */ @@ -36,16 +39,6 @@ message ExtendedBlockProto { } /** - * Block Token - */ -message BlockTokenIdentifierProto { - required bytes identifier = 1; - required bytes password = 2; - required string kind = 3; - required string service = 4; -} - -/** * Identifies a Datanode */ message DatanodeIDProto { @@ -126,7 +119,7 @@ message LocatedBlockProto { // If block has few corrupt replicas, they are filtered and // their locations are not part of this object - required BlockTokenIdentifierProto blockToken = 5; + required hadoop.common.TokenProto blockToken = 5; } message DataEncryptionKeyProto { Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier Tue Jan 22 19:33:02 2013 @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer Tue Jan 22 19:33:02 2013 @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.hdfs.DFSClient$Renewer org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer org.apache.hadoop.hdfs.HftpFileSystem$TokenManager Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1428602-1437112 Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1428602-1437112 Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1428602-1437112 Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1428602-1437112 Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Jan 22 19:33:02 2013 @@ -48,7 +48,6 @@ import java.io.IOException; import java.io.PrintWriter; import java.io.RandomAccessFile; import java.net.InetSocketAddress; -import java.net.ServerSocket; import java.net.URI; import java.net.URISyntaxException; import java.nio.channels.FileChannel; @@ -2290,19 +2289,6 @@ public class MiniDFSCluster { return nameNodes[nnIndex].nameNode; } - private int getFreeSocketPort() { - int port = 0; - try { - ServerSocket s = new ServerSocket(0); - port = s.getLocalPort(); - s.close(); - return port; - } catch (IOException e) { - // Could not get a free port. Return default port 0. - } - return port; - } - protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile, boolean checkDataNodeAddrConfig) throws IOException { if (setupHostsFile) { @@ -2311,7 +2297,7 @@ public class MiniDFSCluster { throw new IOException("Parameter dfs.hosts is not setup in conf"); } // Setup datanode in the include file, if it is defined in the conf - String address = "127.0.0.1:" + getFreeSocketPort(); + String address = "127.0.0.1:" + NetUtils.getFreeSocketPort(); if (checkDataNodeAddrConfig) { conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address); } else { Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Tue Jan 22 19:33:02 2013 @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.fail; @@ -31,6 +32,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -176,6 +178,44 @@ public class TestDFSRollback { cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); + + log("Normal BlockPool rollback", numDirs); + UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); + UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + .format(false) + .manageDataDfsDirs(false) + .manageNameDfsDirs(false) + .startupOption(StartupOption.ROLLBACK) + .build(); + UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); + UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current", + UpgradeUtilities.getCurrentBlockPoolID(cluster)); + // Create a previous snapshot for the blockpool + UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous", + UpgradeUtilities.getCurrentBlockPoolID(cluster)); + // Older LayoutVersion to make it rollback + storageInfo = new StorageInfo( + UpgradeUtilities.getCurrentLayoutVersion()+1, + UpgradeUtilities.getCurrentNamespaceID(cluster), + UpgradeUtilities.getCurrentClusterID(cluster), + UpgradeUtilities.getCurrentFsscTime(cluster)); + // Create old VERSION file for each data dir + for (int i=0; i<dataNodeDirs.length; i++) { + Path bpPrevPath = new Path(dataNodeDirs[i] + "/current/" + + UpgradeUtilities.getCurrentBlockPoolID(cluster)); + UpgradeUtilities.createBlockPoolVersionFile( + new File(bpPrevPath.toString()), + storageInfo, + UpgradeUtilities.getCurrentBlockPoolID(cluster)); + } + + cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null); + assertTrue(cluster.isDataNodeUp()); + + cluster.shutdown(); + UpgradeUtilities.createEmptyDirs(nameNodeDirs); + UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("NameNode rollback without existing previous dir", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Tue Jan 22 19:33:02 2013 @@ -444,21 +444,21 @@ public class TestDataTransferProtocol { recvBuf.reset(); blk.setBlockId(blkid-1); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen); + 0L, fileLen, true); sendRecvData("Wrong block ID " + newBlockId + " for read", false); // negative block start offset -1L sendBuf.reset(); blk.setBlockId(blkid); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - -1L, fileLen); + -1L, fileLen, true); sendRecvData("Negative start-offset for read for block " + firstBlock.getBlockId(), false); // bad block start offset sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - fileLen, fileLen); + fileLen, fileLen, true); sendRecvData("Wrong start-offset for reading block " + firstBlock.getBlockId(), false); @@ -475,7 +475,7 @@ public class TestDataTransferProtocol { sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, -1L-random.nextInt(oneMil)); + 0L, -1L-random.nextInt(oneMil), true); sendRecvData("Negative length for reading block " + firstBlock.getBlockId(), false); @@ -488,14 +488,14 @@ public class TestDataTransferProtocol { recvOut); sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen+1); + 0L, fileLen+1, true); sendRecvData("Wrong length for reading block " + firstBlock.getBlockId(), false); //At the end of all this, read the file to make sure that succeeds finally. sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen); + 0L, fileLen, true); readFile(fileSys, file, fileLen); } finally { cluster.shutdown(); Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java Tue Jan 22 19:33:02 2013 @@ -158,7 +158,7 @@ public class TestLargeBlock { * Test for block size of 2GB + 512B * @throws IOException in case of errors */ - @Test + @Test(timeout = 120000) public void testLargeBlockSize() throws IOException { final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B runTest(blockSize); Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java Tue Jan 22 19:33:02 2013 @@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs; import java.io.IOException; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; +import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -56,4 +59,11 @@ public class TestParallelRead extends Te public void testParallelReadMixed() throws IOException { runTestWorkload(new MixedWorkloadHelper()); } + + @Test + public void testParallelNoChecksums() throws IOException { + verifyChecksums = false; + runTestWorkload(new MixedWorkloadHelper()); + } + } Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java Tue Jan 22 19:33:02 2013 @@ -46,6 +46,7 @@ public class TestParallelReadUtil { static final int FILE_SIZE_K = 256; static Random rand = null; static final int DEFAULT_REPLICATION_FACTOR = 2; + protected boolean verifyChecksums = true; static { // The client-trace log ends up causing a lot of blocking threads @@ -317,7 +318,8 @@ public class TestParallelReadUtil { testInfo.filepath = new Path("/TestParallelRead.dat." + i); testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K); - testInfo.dis = dfsClient.open(testInfo.filepath.toString()); + testInfo.dis = dfsClient.open(testInfo.filepath.toString(), + dfsClient.dfsClientConf.ioBufferSize, verifyChecksums); for (int j = 0; j < nWorkerEach; ++j) { workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper); Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java Tue Jan 22 19:33:02 2013 @@ -24,11 +24,14 @@ import java.io.DataOutputStream; import java.io.IOException; import java.util.Random; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.log4j.Level; import org.junit.Test; /** @@ -194,11 +197,19 @@ public class TestPread { */ @Test public void testPreadDFS() throws IOException { - dfsPreadTest(false); //normal pread - dfsPreadTest(true); //trigger read code path without transferTo. + dfsPreadTest(false, true); //normal pread + dfsPreadTest(true, true); //trigger read code path without transferTo. } - private void dfsPreadTest(boolean disableTransferTo) throws IOException { + @Test + public void testPreadDFSNoChecksum() throws IOException { + ((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL); + dfsPreadTest(false, false); + dfsPreadTest(true, false); + } + + private void dfsPreadTest(boolean disableTransferTo, boolean verifyChecksum) + throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096); @@ -210,6 +221,7 @@ public class TestPread { } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fileSys = cluster.getFileSystem(); + fileSys.setVerifyChecksum(verifyChecksum); try { Path file1 = new Path("preadtest.dat"); writeFile(fileSys, file1); Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Tue Jan 22 19:33:02 2013 @@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; @@ -69,7 +69,9 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.DataChecksum; import org.junit.Test; import com.google.common.base.Joiner; @@ -374,7 +376,7 @@ public class TestPBHelper { Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")); - BlockTokenIdentifierProto tokenProto = PBHelper.convert(token); + TokenProto tokenProto = PBHelper.convert(token); Token<BlockTokenIdentifier> token2 = PBHelper.convert(tokenProto); compare(token, token2); } @@ -403,30 +405,74 @@ public class TestPBHelper { assertEquals(expected.getKind(), actual.getKind()); assertEquals(expected.getService(), actual.getService()); } - - @Test - public void testConvertLocatedBlock() { - DatanodeInfo [] dnInfos = { - DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), - DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), - DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL) + + private void compare(LocatedBlock expected, LocatedBlock actual) { + assertEquals(expected.getBlock(), actual.getBlock()); + compare(expected.getBlockToken(), actual.getBlockToken()); + assertEquals(expected.getStartOffset(), actual.getStartOffset()); + assertEquals(expected.isCorrupt(), actual.isCorrupt()); + DatanodeInfo [] ei = expected.getLocations(); + DatanodeInfo [] ai = actual.getLocations(); + assertEquals(ei.length, ai.length); + for (int i = 0; i < ei.length ; i++) { + compare(ei[i], ai[i]); + } + } + + private LocatedBlock createLocatedBlock() { + DatanodeInfo[] dnInfos = { + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", + AdminStates.DECOMMISSION_INPROGRESS), + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", + AdminStates.DECOMMISSIONED), + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", + AdminStates.NORMAL) }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); + lb.setBlockToken(new Token<BlockTokenIdentifier>( + "identifier".getBytes(), "password".getBytes(), new Text("kind"), + new Text("service"))); + return lb; + } + + @Test + public void testConvertLocatedBlock() { + LocatedBlock lb = createLocatedBlock(); LocatedBlockProto lbProto = PBHelper.convert(lb); LocatedBlock lb2 = PBHelper.convert(lbProto); - assertEquals(lb.getBlock(), lb2.getBlock()); - compare(lb.getBlockToken(), lb2.getBlockToken()); - assertEquals(lb.getStartOffset(), lb2.getStartOffset()); - assertEquals(lb.isCorrupt(), lb2.isCorrupt()); - DatanodeInfo [] dnInfos2 = lb2.getLocations(); - assertEquals(dnInfos.length, dnInfos2.length); - for (int i = 0; i < dnInfos.length ; i++) { - compare(dnInfos[i], dnInfos2[i]); + compare(lb,lb2); + } + + @Test + public void testConvertLocatedBlockList() { + ArrayList<LocatedBlock> lbl = new ArrayList<LocatedBlock>(); + for (int i=0;i<3;i++) { + lbl.add(createLocatedBlock()); + } + List<LocatedBlockProto> lbpl = PBHelper.convertLocatedBlock2(lbl); + List<LocatedBlock> lbl2 = PBHelper.convertLocatedBlock(lbpl); + assertEquals(lbl.size(), lbl2.size()); + for (int i=0;i<lbl.size();i++) { + compare(lbl.get(i), lbl2.get(2)); } } @Test + public void testConvertLocatedBlockArray() { + LocatedBlock [] lbl = new LocatedBlock[3]; + for (int i=0;i<3;i++) { + lbl[i] = createLocatedBlock(); + } + LocatedBlockProto [] lbpl = PBHelper.convertLocatedBlock(lbl); + LocatedBlock [] lbl2 = PBHelper.convertLocatedBlock(lbpl); + assertEquals(lbl.length, lbl2.length); + for (int i=0;i<lbl.length;i++) { + compare(lbl[i], lbl2[i]); + } + } + + @Test public void testConvertDatanodeRegistration() { DatanodeID dnId = DFSTestUtil.getLocalDatanodeID(); BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) }; @@ -471,4 +517,20 @@ public class TestPBHelper { } } } + + @Test + public void testChecksumTypeProto() { + assertEquals(DataChecksum.Type.NULL, + PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL)); + assertEquals(DataChecksum.Type.CRC32, + PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32)); + assertEquals(DataChecksum.Type.CRC32C, + PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C)); + assertEquals(PBHelper.convert(DataChecksum.Type.NULL), + HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL); + assertEquals(PBHelper.convert(DataChecksum.Type.CRC32), + HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32); + assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C), + HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C); + } } Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java Tue Jan 22 19:33:02 2013 @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.net.NetworkTopology; import org.junit.Test; +import junit.framework.Assert; /** * This class tests if a balancer schedules tasks correctly. @@ -174,12 +175,25 @@ public class TestBalancerWithNodeGroup { LOG.info("Rebalancing with default factor."); waitForBalancer(totalUsedSpace, totalCapacity); } + + private void runBalancerCanFinish(Configuration conf, + long totalUsedSpace, long totalCapacity) throws Exception { + waitForHeartBeat(totalUsedSpace, totalCapacity); + + // start rebalancing + Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + Assert.assertTrue(r == Balancer.ReturnStatus.SUCCESS.code || + (r == Balancer.ReturnStatus.NO_MOVE_PROGRESS.code)); + waitForHeartBeat(totalUsedSpace, totalCapacity); + LOG.info("Rebalancing with default factor."); + } /** * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test rack locality for balancer policy. */ - @Test + @Test(timeout=60000) public void testBalancerWithRackLocality() throws Exception { Configuration conf = createConf(); long[] capacities = new long[]{CAPACITY, CAPACITY}; @@ -217,7 +231,7 @@ public class TestBalancerWithNodeGroup { totalCapacity += newCapacity; // run balancer and validate results - runBalancer(conf, totalUsedSpace, totalCapacity); + runBalancerCanFinish(conf, totalUsedSpace, totalCapacity); DatanodeInfo[] datanodeReport = client.getDatanodeReport(DatanodeReportType.ALL); @@ -245,7 +259,7 @@ public class TestBalancerWithNodeGroup { * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test node-group locality for balancer policy. */ - @Test + @Test(timeout=60000) public void testBalancerWithNodeGroup() throws Exception { Configuration conf = createConf(); long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY}; @@ -289,4 +303,49 @@ public class TestBalancerWithNodeGroup { cluster.shutdown(); } } + + /** + * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2) + * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster + * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according + * to replica placement policy with NodeGroup. As a result, n2 and n3 will be + * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3 + * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer + * to end in 5 iterations without move block process. + */ + @Test(timeout=60000) + public void testBalancerEndInNoMoveProgress() throws Exception { + Configuration conf = createConf(); + long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY}; + String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1}; + String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2}; + + int numOfDatanodes = capacities.length; + assertEquals(numOfDatanodes, racks.length); + assertEquals(numOfDatanodes, nodeGroups.length); + MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf) + .numDataNodes(capacities.length) + .racks(racks) + .simulatedCapacities(capacities); + MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups); + cluster = new MiniDFSClusterWithNodeGroup(builder); + try { + cluster.waitActive(); + client = NameNodeProxies.createProxy(conf, + cluster.getFileSystem(0).getUri(), + ClientProtocol.class).getProxy(); + + long totalCapacity = TestBalancer.sum(capacities); + // fill up the cluster to be 60% full + long totalUsedSpace = totalCapacity * 6 / 10; + TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, + (short) (3), 0); + + // run balancer which can finish in 5 iterations with no block movement. + runBalancerCanFinish(conf, totalUsedSpace, totalCapacity); + + } finally { + cluster.shutdown(); + } + } } Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Tue Jan 22 19:33:02 2013 @@ -19,10 +19,13 @@ package org.apache.hadoop.hdfs.server.bl import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; +import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; @@ -429,4 +432,57 @@ public class TestBlockManager { } return repls; } + + /** + * Test that a source node for a highest-priority replication is chosen even if all available + * source nodes have reached their replication limits. + */ + @Test + public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception { + bm.maxReplicationStreams = 0; + bm.replicationStreamsHardLimit = 1; + + long blockId = 42; // arbitrary + Block aBlock = new Block(blockId, 0, 0); + + List<DatanodeDescriptor> origNodes = getNodes(0, 1); + // Add the block to the first node. + addBlockOnNodes(blockId,origNodes.subList(0,1)); + + List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>(); + List<DatanodeDescriptor> liveNodes = new LinkedList<DatanodeDescriptor>(); + + assertNotNull("Chooses source node for a highest-priority replication" + + " even if all available source nodes have reached their replication" + + " limits below the hard limit.", + bm.chooseSourceDatanode( + aBlock, + cntNodes, + liveNodes, + new NumberReplicas(), + UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY)); + + assertNull("Does not choose a source node for a less-than-highest-priority" + + " replication since all available source nodes have reached" + + " their replication limits.", + bm.chooseSourceDatanode( + aBlock, + cntNodes, + liveNodes, + new NumberReplicas(), + UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED)); + + // Increase the replication count to test replication count > hard limit + DatanodeDescriptor targets[] = { origNodes.get(1) }; + origNodes.get(0).addBlockToBeReplicated(aBlock, targets); + + assertNull("Does not choose a source node for a highest-priority" + + " replication when all available nodes exceed the hard limit.", + bm.chooseSourceDatanode( + aBlock, + cntNodes, + liveNodes, + new NumberReplicas(), + UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY)); + } } Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Tue Jan 22 19:33:02 2013 @@ -382,6 +382,24 @@ public class TestReplicationPolicy { assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); } + + /** + * In this testcase, it tries to choose more targets than available nodes and + * check the result, with stale node avoidance on the write path enabled. + * @throws Exception + */ + @Test + public void testChooseTargetWithMoreThanAvailableNodesWithStaleness() + throws Exception { + try { + namenode.getNamesystem().getBlockManager().getDatanodeManager() + .setAvoidStaleDataNodesForWrite(true); + testChooseTargetWithMoreThanAvailableNodes(); + } finally { + namenode.getNamesystem().getBlockManager().getDatanodeManager() + .setAvoidStaleDataNodesForWrite(false); + } + } /** * In this testcase, it tries to choose more targets than available nodes and @@ -389,7 +407,7 @@ public class TestReplicationPolicy { * @throws Exception */ @Test - public void testChooseTargetWithMoreThanAvaiableNodes() throws Exception { + public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { // make data node 0 & 1 to be not qualified to choose: not enough disk space for(int i=0; i<2; i++) { dataNodes[i].updateHeartbeat( Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java Tue Jan 22 19:33:02 2013 @@ -25,6 +25,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; import org.junit.Test; +import com.google.common.base.Charsets; + /** * @@ -45,7 +47,7 @@ public class TestPathComponents { String pathString = str; byte[][] oldPathComponents = INode.getPathComponents(pathString); byte[][] newPathComponents = - DFSUtil.bytes2byteArray(pathString.getBytes("UTF-8"), + DFSUtil.bytes2byteArray(pathString.getBytes(Charsets.UTF_8), (byte) Path.SEPARATOR_CHAR); if (oldPathComponents[0] == null) { assertTrue(oldPathComponents[0] == newPathComponents[0]); Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java Tue Jan 22 19:33:02 2013 @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.tools; import java.io.IOException; @@ -37,4 +54,4 @@ public class FakeRenewer extends TokenRe lastRenewed = null; lastCanceled = null; } -} \ No newline at end of file +} Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer Tue Jan 22 19:33:02 2013 @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.tools.FakeRenewer Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/pom.xml?rev=1437113&r1=1437112&r2=1437113&view=diff ============================================================================== --- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/pom.xml (original) +++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/pom.xml Tue Jan 22 19:33:02 2013 @@ -48,9 +48,6 @@ http://maven.apache.org/xsd/maven-4.0.0. <groupId>org.apache.rat</groupId> <artifactId>apache-rat-plugin</artifactId> <configuration> - <includes> - <include>pom.xml</include> - </includes> </configuration> </plugin> </plugins>
