Author: szetszwo
Date: Thu May 14 23:53:12 2009
New Revision: 774966
URL: http://svn.apache.org/viewvc?rev=774966&view=rev
Log:
HADOOP-5822. Fix javac warnings in several dfs tests related to unncessary
casts. Contributed by Jakob Homan
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=774966&r1=774965&r2=774966&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Thu May 14 23:53:12 2009
@@ -585,6 +585,9 @@
HADOOP-5820. Fix findbugs warnings for http related codes in hdfs.
(szetszwo)
+ HADOOP-5822. Fix javac warnings in several dfs tests related to unncessary
+ casts. (Jakob Homan via szetszwo)
+
Release 0.20.1 - Unreleased
INCOMPATIBLE CHANGES
Modified:
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=774966&r1=774965&r2=774966&view=diff
==============================================================================
---
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
(original)
+++
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
Thu May 14 23:53:12 2009
@@ -60,8 +60,6 @@
InetSocketAddress dnAddr;
ByteArrayOutputStream sendBuf = new ByteArrayOutputStream(128);
DataOutputStream sendOut = new DataOutputStream(sendBuf);
- // byte[] recvBuf = new byte[128];
- // ByteBuffer recvByteBuf = ByteBuffer.wrap(recvBuf);
ByteArrayOutputStream recvBuf = new ByteArrayOutputStream(128);
DataOutputStream recvOut = new DataOutputStream(recvBuf);
@@ -170,13 +168,13 @@
// bad ops
sendBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)(DataTransferProtocol.OP_WRITE_BLOCK-1));
+ sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK - 1);
sendRecvData("Wrong Op Code", true);
/* Test OP_WRITE_BLOCK */
sendBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_WRITE_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
sendOut.writeLong(newBlockId); // block id
sendOut.writeLong(0); // generation stamp
sendOut.writeInt(0); // targets in pipeline
@@ -196,7 +194,7 @@
sendBuf.reset();
recvBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_WRITE_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
sendOut.writeLong(newBlockId);
sendOut.writeLong(0); // generation stamp
sendOut.writeInt(0); // targets in pipeline
@@ -212,7 +210,7 @@
sendBuf.reset();
recvBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_WRITE_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
sendOut.writeLong(++newBlockId);
sendOut.writeLong(0); // generation stamp
sendOut.writeInt(0); // targets in pipeline
@@ -222,7 +220,7 @@
sendOut.writeInt(0);
AccessToken.DUMMY_TOKEN.write(sendOut);
sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
- sendOut.writeInt((int)512);
+ sendOut.writeInt(512);
sendOut.writeInt(4); // size of packet
sendOut.writeLong(0); // OffsetInBlock
sendOut.writeLong(100); // sequencenumber
@@ -240,7 +238,7 @@
sendBuf.reset();
recvBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_WRITE_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
sendOut.writeLong(++newBlockId);
sendOut.writeLong(0); // generation stamp
sendOut.writeInt(0); // targets in pipeline
@@ -250,7 +248,7 @@
sendOut.writeInt(0);
AccessToken.DUMMY_TOKEN.write(sendOut);
sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
- sendOut.writeInt((int)512); // checksum size
+ sendOut.writeInt(512); // checksum size
sendOut.writeInt(8); // size of packet
sendOut.writeLong(0); // OffsetInBlock
sendOut.writeLong(100); // sequencenumber
@@ -270,7 +268,7 @@
sendBuf.reset();
recvBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_READ_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
newBlockId = firstBlock.getBlockId()-1;
sendOut.writeLong(newBlockId);
sendOut.writeLong(firstBlock.getGenerationStamp());
@@ -284,7 +282,7 @@
// negative block start offset
sendBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_READ_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(-1L);
@@ -297,7 +295,7 @@
// bad block start offset
sendBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_READ_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(fileLen);
@@ -312,7 +310,7 @@
recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_SUCCESS);
sendBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_READ_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(0);
@@ -327,7 +325,7 @@
recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
sendBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_READ_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(0);
@@ -340,7 +338,7 @@
//At the end of all this, read the file to make sure that succeeds finally.
sendBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
- sendOut.writeByte((byte)DataTransferProtocol.OP_READ_BLOCK);
+ sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(0);
Modified:
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java?rev=774966&r1=774965&r2=774966&view=diff
==============================================================================
---
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
(original)
+++
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
Thu May 14 23:53:12 2009
@@ -17,9 +17,14 @@
*/
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
-import java.io.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
import java.util.Random;
+
+import junit.framework.TestCase;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -50,7 +55,7 @@
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, new
FsPermission((short)0777),
true, fileSys.getConf().getInt("io.file.buffer.size", 4096),
- (short)NUM_OF_DATANODES, BLOCK_SIZE, null);
+ NUM_OF_DATANODES, BLOCK_SIZE, null);
stm.write(expected);
stm.close();
}
@@ -169,7 +174,7 @@
// test skip to non-checksum-boundary pos
stm.seek(0);
- testSkip1(HALF_CHUNK_SIZE+1);
+ testSkip1(HALF_CHUNK_SIZE + 1);
testSkip1(BYTES_PER_SUM);
testSkip1(HALF_CHUNK_SIZE);
@@ -322,7 +327,7 @@
private void checkSeekAndRead() throws IOException {
int position = 1;
- int len = 2 * BYTES_PER_SUM - (int) position;
+ int len = 2 * BYTES_PER_SUM - position;
readAndCompare(stm, position, len);
position = BYTES_PER_SUM;
Modified:
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=774966&r1=774965&r2=774966&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
(original)
+++ hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
Thu May 14 23:53:12 2009
@@ -17,18 +17,20 @@
*/
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
-import java.io.*;
-import java.net.*;
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
import java.util.List;
+import junit.framework.TestCase;
+
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileUtil.HardLink;
-import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -175,8 +177,8 @@
// Create hard links for a few of the blocks
//
for (int i = 0; i < blocks.size(); i = i + 2) {
- Block b = (Block) blocks.get(i).getBlock();
- FSDataset fsd = (FSDataset) dataset;
+ Block b = blocks.get(i).getBlock();
+ FSDataset fsd = dataset;
File f = fsd.getFile(b);
File link = new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f +
@@ -188,7 +190,7 @@
// Detach all blocks. This should remove hardlinks (if any)
//
for (int i = 0; i < blocks.size(); i++) {
- Block b = (Block) blocks.get(i).getBlock();
+ Block b = blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned true",
dataset.detachBlock(b, 1) == true);
@@ -198,7 +200,7 @@
// return false
//
for (int i = 0; i < blocks.size(); i++) {
- Block b = (Block) blocks.get(i).getBlock();
+ Block b = blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned false",
dataset.detachBlock(b, 1) == false);
Modified:
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=774966&r1=774965&r2=774966&view=diff
==============================================================================
---
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
(original)
+++
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
Thu May 14 23:53:12 2009
@@ -21,13 +21,13 @@
import java.io.IOException;
import junit.framework.TestCase;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
-import org.apache.hadoop.ipc.RPC;
/**
* This test checks correctness of port usage by hdfs components:
Modified: hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java?rev=774966&r1=774965&r2=774966&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java
(original)
+++ hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java Thu
May 14 23:53:12 2009
@@ -17,9 +17,12 @@
*/
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
-import java.io.*;
+import java.io.DataOutputStream;
+import java.io.IOException;
import java.util.Random;
+
+import junit.framework.TestCase;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -42,7 +45,7 @@
// test empty file open and read
stm.close();
FSDataInputStream in = fileSys.open(name);
- byte[] buffer = new byte[(int)(12*blockSize)];
+ byte[] buffer = new byte[12 * blockSize];
in.readFully(0, buffer, 0, 0);
IOException res = null;
try { // read beyond the end of the file
@@ -85,7 +88,7 @@
private void pReadFile(FileSystem fileSys, Path name) throws IOException {
FSDataInputStream stm = fileSys.open(name);
- byte[] expected = new byte[(int)(12*blockSize)];
+ byte[] expected = new byte[12 * blockSize];
if (simulatedStorage) {
for (int i= 0; i < expected.length; i++) {
expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
@@ -109,17 +112,17 @@
// Now see if we can cross a single block boundary successfully
// read 4K bytes from blockSize - 2K offset
stm.readFully(blockSize - 2048, actual, 0, 4096);
- checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 3");
+ checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
// now see if we can cross two block boundaries successfully
// read blockSize + 4K bytes from blockSize - 2K offset
- actual = new byte[(int)(blockSize+4096)];
+ actual = new byte[blockSize + 4096];
stm.readFully(blockSize - 2048, actual);
- checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 4");
+ checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
// now see if we can cross two block boundaries that are not cached
// read blockSize + 4K bytes from 10*blockSize - 2K offset
- actual = new byte[(int)(blockSize+4096)];
- stm.readFully(10*blockSize - 2048, actual);
- checkAndEraseData(actual, (int)(10*blockSize-2048), expected, "Pread Test
5");
+ actual = new byte[blockSize + 4096];
+ stm.readFully(10 * blockSize - 2048, actual);
+ checkAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test
5");
// now check that even after all these preads, we can still read
// bytes 8K-12K
actual = new byte[4096];
Modified:
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java?rev=774966&r1=774965&r2=774966&view=diff
==============================================================================
---
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
(original)
+++
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
Thu May 14 23:53:12 2009
@@ -36,7 +36,7 @@
Block block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
// keep a copy of all datanode descriptor
- DatanodeDescriptor[] datanodes = (DatanodeDescriptor[])
+ DatanodeDescriptor[] datanodes =
namesystem.heartbeats.toArray(new
DatanodeDescriptor[REPLICATION_FACTOR]);
// start two new nodes