Repository: hadoop Updated Branches: refs/heads/HDFS-7240 0688a1c25 -> ba1afb25c
HDFS-11969. Block Storage: Convert unnecessary info log levels to debug. Contributed by Mukul Kumar Singh. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba1afb25 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba1afb25 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba1afb25 Branch: refs/heads/HDFS-7240 Commit: ba1afb25ccc2fa67e091548bd38afaf07fabab59 Parents: 0688a1c Author: Anu Engineer <[email protected]> Authored: Wed Jun 14 19:33:59 2017 -0700 Committer: Anu Engineer <[email protected]> Committed: Wed Jun 14 19:33:59 2017 -0700 ---------------------------------------------------------------------- .../jscsiHelper/ContainerCacheFlusher.java | 2 +- .../web/storage/DistributedStorageHandler.java | 22 -------------------- 2 files changed, 1 insertion(+), 23 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1afb25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java index 19372f4..3998333 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java @@ -402,7 +402,7 @@ public class ContainerCacheFlusher implements Runnable { // should be flip instead of rewind, because we also need to make sure // the end position is correct. blockIDBuffer.flip(); - LOG.info("Remaining blocks count {} and {}", blockIDBuffer.remaining(), + LOG.debug("Remaining blocks count {} and {}", blockIDBuffer.remaining(), blockCount); while (blockIDBuffer.remaining() >= (Long.SIZE / Byte.SIZE)) { long blockID = blockIDBuffer.getLong(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1afb25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index 468f22a..b1b5df3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.ozone.protocol.proto .ContainerProtos.GetKeyResponseProto; import org.apache.hadoop.hdfs.ozone.protocol.proto .ContainerProtos.KeyData; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.datanode.fsdataset .LengthInputStream; import org.apache.hadoop.ksm.helpers.KsmBucketArgs; @@ -45,7 +44,6 @@ import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.ScmConfigKeys; import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.protocol.LocatedContainer; import org.apache.hadoop.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.web.exceptions.OzoneException; @@ -73,7 +71,6 @@ import java.io.IOException; import java.io.OutputStream; import java.text.SimpleDateFormat; import java.util.Date; -import java.util.Set; import java.util.TimeZone; import java.util.Locale; import java.util.List; @@ -469,23 +466,4 @@ public final class DistributedStorageHandler implements StorageHandler { sdf.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); return sdf.format(date); } - - /** - * Translates a set of container locations, ordered such that the first is the - * leader, into a corresponding {@link Pipeline} object. - * - * @param locatedContainer container location - * @return pipeline corresponding to container locations - */ - private static Pipeline newPipelineFromLocatedContainer( - LocatedContainer locatedContainer) { - Set<DatanodeInfo> locations = locatedContainer.getLocations(); - String leaderId = locations.iterator().next().getDatanodeUuid(); - Pipeline pipeline = new Pipeline(leaderId); - for (DatanodeInfo location : locations) { - pipeline.addMember(location); - } - pipeline.setContainerName(locatedContainer.getContainerName()); - return pipeline; - } } --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
