Author: arp Date: Mon Oct 21 17:11:34 2013 New Revision: 1534279 URL: http://svn.apache.org/r1534279 Log: Merging r1533208 through r1534278 from trunk to branch HDFS-2832
Added: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java - copied unchanged from r1534278, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java - copied unchanged from r1534278, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/util/posix_util.c hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1533208-1534278 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Oct 21 17:11:34 2013 @@ -324,6 +324,11 @@ Release 2.3.0 - UNRELEASED HDFS-5130. Add test for snapshot related FsShell and DFSAdmin commands. (Binglin Chang via jing9) + HDFS-5374. Remove deadcode in DFSOutputStream. (suresh) + + HDFS-4511. Cover package org.apache.hadoop.hdfs.tools with unit test + (Andrey Klochkov via jeagles) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) @@ -375,6 +380,9 @@ Release 2.2.1 - UNRELEASED HDFS-5360. Improvement of usage message of renameSnapshot and deleteSnapshot. (Shinichi Yamashita via wang) + HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. + (Vinayakumar B via umamahesh) + OPTIMIZATIONS BUG FIXES @@ -410,6 +418,8 @@ Release 2.2.1 - UNRELEASED HDFS-5370. Typo in Error Message: different between range in condition and range in error message. (Kousuke Saruta via suresh) + HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml Mon Oct 21 17:11:34 2013 @@ -83,7 +83,7 @@ <Class name="org.apache.hadoop.mapred.Task$TaskReporter" /> <Method name="run" /> <Bug pattern="DM_EXIT" /> - </Match> + </Match> <!-- We need to cast objects between old and new api objects --> @@ -325,6 +325,12 @@ <Field name="modification" /> <Bug pattern="VO_VOLATILE_INCREMENT" /> </Match> + <!-- Replace System.exit() call with ExitUtil.terminate() --> + <Match> + <Class name="org.apache.hadoop.hdfs.tools.JMXGet"/> + <Method name="main" /> + <Bug pattern="NP_NULL_ON_SOME_PATH" /> + </Match> <Match> <Class name="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo" /> <Method name="setDirInternal" /> Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml Mon Oct 21 17:11:34 2013 @@ -176,6 +176,11 @@ http://maven.apache.org/xsd/maven-4.0.0. <artifactId>xmlenc</artifactId> <scope>compile</scope> </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty</artifactId> + <scope>test</scope> + </dependency> </dependencies> <build> Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt Mon Oct 21 17:11:34 2013 @@ -62,6 +62,11 @@ endfunction() INCLUDE(CheckCSourceCompiles) CHECK_C_SOURCE_COMPILES("int main(void) { static __thread int i = 0; return 0; }" HAVE_BETTER_TLS) +# Check if we need to link dl library to get dlopen. +# dlopen on Linux is in separate library but on FreeBSD its in libc +INCLUDE(CheckLibraryExists) +CHECK_LIBRARY_EXISTS(dl dlopen "" NEED_LINK_DL) + find_package(JNI REQUIRED) if (NOT GENERATED_JAVAH) # Must identify where the generated headers have been placed @@ -89,9 +94,13 @@ add_dual_library(hdfs main/native/libhdfs/jni_helper.c main/native/libhdfs/hdfs.c ) +if (NEED_LINK_DL) + set(LIB_DL dl) +endif(NEED_LINK_DL) + target_link_dual_libraries(hdfs ${JAVA_JVM_LIBRARY} - dl + ${LIB_DL} pthread ) dual_output_directory(hdfs target/usr/local/lib) Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1533208-1534278 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Mon Oct 21 17:11:34 2013 @@ -47,7 +47,6 @@ import org.apache.hadoop.fs.FSOutputSumm import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Syncable; -import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; @@ -138,7 +137,7 @@ public class DFSOutputStream extends FSO private long currentSeqno = 0; private long lastQueuedSeqno = -1; private long lastAckedSeqno = -1; - private long bytesCurBlock = 0; // bytes writen in current block + private long bytesCurBlock = 0; // bytes written in current block private int packetSize = 0; // write packet size, not including the header. private int chunksPerPacket = 0; private final AtomicReference<IOException> lastException = new AtomicReference<IOException>(); @@ -460,8 +459,7 @@ public class DFSOutputStream extends FSO } } - Packet one = null; - + Packet one; try { // process datanode IO errors if any boolean doSleep = false; @@ -506,7 +504,7 @@ public class DFSOutputStream extends FSO if(DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Allocating new block"); } - nodes = nextBlockOutputStream(src); + nodes = nextBlockOutputStream(); initDataStreaming(); } else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) { if(DFSClient.LOG.isDebugEnabled()) { @@ -571,9 +569,6 @@ public class DFSOutputStream extends FSO } lastPacket = Time.now(); - if (one.isHeartbeatPacket()) { //heartbeat packet - } - // update bytesSent long tmpBytesSent = one.getLastByteOffsetBlock(); if (bytesSent < tmpBytesSent) { @@ -692,7 +687,7 @@ public class DFSOutputStream extends FSO } // - // Processes reponses from the datanodes. A packet is removed + // Processes responses from the datanodes. A packet is removed // from the ackQueue when its response arrives. // private class ResponseProcessor extends Daemon { @@ -734,18 +729,18 @@ public class DFSOutputStream extends FSO } assert seqno != PipelineAck.UNKOWN_SEQNO : - "Ack for unkown seqno should be a failed ack: " + ack; + "Ack for unknown seqno should be a failed ack: " + ack; if (seqno == Packet.HEART_BEAT_SEQNO) { // a heartbeat ack continue; } // a success ack for a data packet - Packet one = null; + Packet one; synchronized (dataQueue) { one = ackQueue.getFirst(); } if (one.seqno != seqno) { - throw new IOException("Responseprocessor: Expecting seqno " + + throw new IOException("ResponseProcessor: Expecting seqno " + " for block " + block + one.seqno + " but received " + seqno); } @@ -1056,7 +1051,7 @@ public class DFSOutputStream extends FSO * Must get block ID and the IDs of the destinations from the namenode. * Returns the list of target datanodes. */ - private DatanodeInfo[] nextBlockOutputStream(String client) throws IOException { + private DatanodeInfo[] nextBlockOutputStream() throws IOException { LocatedBlock lb = null; DatanodeInfo[] nodes = null; int count = dfsClient.getConf().nBlockWriteRetry; @@ -1214,8 +1209,7 @@ public class DFSOutputStream extends FSO } private LocatedBlock locateFollowingBlock(long start, - DatanodeInfo[] excludedNodes) - throws IOException, UnresolvedLinkException { + DatanodeInfo[] excludedNodes) throws IOException { int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry; long sleeptime = 400; while (true) { @@ -1287,7 +1281,7 @@ public class DFSOutputStream extends FSO * Create a socket for a write pipeline * @param first the first datanode * @param length the pipeline length - * @param client + * @param client client * @return the socket connected to the first datanode */ static Socket createSocketForPipeline(final DatanodeInfo first, @@ -1479,7 +1473,7 @@ public class DFSOutputStream extends FSO // // Rather than wait around for space in the queue, we should instead try to // return to the caller as soon as possible, even though we slightly overrun - // the MAX_PACKETS iength. + // the MAX_PACKETS length. Thread.currentThread().interrupt(); break; } @@ -1700,7 +1694,7 @@ public class DFSOutputStream extends FSO } } // If 1) any new blocks were allocated since the last flush, or 2) to - // update length in NN is requried, then persist block locations on + // update length in NN is required, then persist block locations on // namenode. if (persistBlocks.getAndSet(false) || updateLength) { try { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Mon Oct 21 17:11:34 2013 @@ -53,6 +53,7 @@ import org.apache.hadoop.security.Creden import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; import com.google.common.base.Charsets; @@ -86,7 +87,7 @@ public class DelegationTokenFetcher { err.println(" --print Print the delegation token"); err.println(); GenericOptionsParser.printGenericCommandUsage(err); - System.exit(1); + ExitUtil.terminate(1); } private static Collection<Token<?>> readTokens(Path file, Configuration conf) Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java Mon Oct 21 17:11:34 2013 @@ -43,6 +43,7 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.ExitUtil; /** * tool to get data from NameNode or DataNode using MBeans currently the @@ -295,7 +296,7 @@ public class JMXGet { // invalid arguments err("Invalid args"); printUsage(opts); - System.exit(-1); + ExitUtil.terminate(-1); } JMXGet jm = new JMXGet(); @@ -317,7 +318,7 @@ public class JMXGet { if (commandLine.hasOption("help")) { printUsage(opts); - System.exit(0); + ExitUtil.terminate(0); } // rest of args @@ -342,6 +343,6 @@ public class JMXGet { res = -1; } - System.exit(res); + ExitUtil.terminate(res); } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java Mon Oct 21 17:11:34 2013 @@ -20,12 +20,14 @@ package org.apache.hadoop.hdfs.tools.sna import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; /** * A tool used to get the difference report between two snapshots, or between @@ -38,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.S * </pre> */ @InterfaceAudience.Private -public class SnapshotDiff { +public class SnapshotDiff extends Configured implements Tool { private static String getSnapshotName(String name) { if (Path.CUR_DIR.equals(name)) { // current directory return ""; @@ -57,7 +59,8 @@ public class SnapshotDiff { return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1); } - public static void main(String[] argv) throws IOException { + @Override + public int run(String[] argv) throws Exception { String description = "SnapshotDiff <snapshotDir> <from> <to>:\n" + "\tGet the difference between two snapshots, \n" + "\tor between a snapshot and the current tree of a directory.\n" + @@ -67,15 +70,14 @@ public class SnapshotDiff { if(argv.length != 3) { System.err.println("Usage: \n" + description); - System.exit(1); + return 1; } - Configuration conf = new Configuration(); - FileSystem fs = FileSystem.get(conf); + FileSystem fs = FileSystem.get(getConf()); if (! (fs instanceof DistributedFileSystem)) { System.err.println( "SnapshotDiff can only be used in DistributedFileSystem"); - System.exit(1); + return 1; } DistributedFileSystem dfs = (DistributedFileSystem) fs; @@ -89,7 +91,14 @@ public class SnapshotDiff { } catch (IOException e) { String[] content = e.getLocalizedMessage().split("\n"); System.err.println("snapshotDiff: " + content[0]); + return 1; } + return 0; + } + + public static void main(String[] argv) throws Exception { + int rc = ToolRunner.run(new SnapshotDiff(), argv); + System.exit(rc); } } Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1527684-1534278 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/util/posix_util.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/util/posix_util.c?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/util/posix_util.c (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/util/posix_util.c Mon Oct 21 17:11:34 2013 @@ -27,6 +27,7 @@ #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> +#include <limits.h> static pthread_mutex_t gTempdirLock = PTHREAD_MUTEX_INITIALIZER; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java?rev=1534279&r1=1534278&r2=1534279&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java Mon Oct 21 17:11:34 2013 @@ -21,9 +21,13 @@ package org.apache.hadoop.tools; import static org.apache.hadoop.test.MetricsAsserts.assertGauge; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; +import java.io.PipedInputStream; +import java.io.PipedOutputStream; +import java.io.PrintStream; import java.util.Random; import org.apache.hadoop.conf.Configuration; @@ -92,6 +96,7 @@ public class TestJMXGet { //jmx.init(); //jmx = new JMXGet(); jmx.init(); // default lists namenode mbeans only + assertTrue("error printAllValues", checkPrintAllValues(jmx)); //get some data from different source assertEquals(numDatanodes, Integer.parseInt( @@ -103,7 +108,24 @@ public class TestJMXGet { cluster.shutdown(); } - + + private static boolean checkPrintAllValues(JMXGet jmx) throws Exception { + int size = 0; + byte[] bytes = null; + String pattern = "List of all the available keys:"; + PipedOutputStream pipeOut = new PipedOutputStream(); + PipedInputStream pipeIn = new PipedInputStream(pipeOut); + System.setErr(new PrintStream(pipeOut)); + jmx.printAllValues(); + if ((size = pipeIn.available()) != 0) { + bytes = new byte[size]; + pipeIn.read(bytes, 0, bytes.length); + } + pipeOut.close(); + pipeIn.close(); + return bytes != null ? new String(bytes).contains(pattern) : false; + } + /** * test JMX connection to DataNode.. * @throws Exception