Author: szetszwo Date: Wed Mar 19 21:05:52 2014 New Revision: 1579397 URL: http://svn.apache.org/r1579397 Log: svn merge -c 1579396 from trunk for HDFS-6123. Do not log stack trace for ReplicaAlreadyExistsException and SocketTimeoutException.
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1579396 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1579397&r1=1579396&r2=1579397&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Mar 19 21:05:52 2014 @@ -177,6 +177,9 @@ Release 2.4.0 - UNRELEASED HDFS-6068. Disallow snapshot names that are also invalid directory names. (sathish via szetszwo) + HDFS-6123. Do not log stack trace for ReplicaAlreadyExistsException and + SocketTimeoutException. (szetszwo) + OPTIMIZATIONS HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1579396 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1579397&r1=1579396&r2=1579397&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Wed Mar 19 21:05:52 2014 @@ -560,7 +560,11 @@ class BlockSender implements java.io.Clo * part of a block and then decides not to read the rest (but leaves * the socket open). */ - LOG.info("exception: ", e); + if (LOG.isTraceEnabled()) { + LOG.trace("Failed to send data:", e); + } else { + LOG.info("Failed to send data: " + e); + } } else { /* Exception while writing to the client. Connection closure from * the other end is mostly the case and we do not care much about Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1579397&r1=1579396&r2=1579397&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Wed Mar 19 21:05:52 2014 @@ -18,12 +18,12 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR; +import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_ACCESS_TOKEN; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_INVALID; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_UNSUPPORTED; -import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_ACCESS_TOKEN; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; -import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT; +import static org.apache.hadoop.util.Time.now; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; @@ -43,7 +43,6 @@ import java.nio.channels.ClosedChannelEx import java.util.Arrays; import org.apache.commons.logging.Log; -import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.ShortCircuitShm.SlotId; import org.apache.hadoop.hdfs.net.Peer; @@ -51,9 +50,9 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor.InvalidMagicNumberException; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver; @@ -231,10 +230,21 @@ class DataXceiver extends Receiver imple } while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0)); } catch (Throwable t) { - LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " + - ((op == null) ? "unknown" : op.name()) + " operation " + - " src: " + remoteAddress + - " dest: " + localAddress, t); + String s = datanode.getDisplayName() + ":DataXceiver error processing " + + ((op == null) ? "unknown" : op.name()) + " operation " + + " src: " + remoteAddress + " dst: " + localAddress; + if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) { + // For WRITE_BLOCK, it is okay if the replica already exists since + // client and replication may write the same block to the same datanode + // at the same time. + if (LOG.isTraceEnabled()) { + LOG.trace(s, t); + } else { + LOG.info(s + "; " + t); + } + } else { + LOG.error(s, t); + } } finally { if (LOG.isDebugEnabled()) { LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "