Repository: hadoop Updated Branches: refs/heads/trunk e4f22b08e -> fac9f91b2
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java index 5e708be..a195bf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java @@ -47,7 +47,7 @@ public enum Quota { /** * Is quota violated? - * The quota is violated if quota is set and usage > quota. + * The quota is violated if quota is set and usage > quota. */ public static boolean isViolated(final long quota, final long usage) { return quota >= 0 && usage > quota; @@ -55,7 +55,8 @@ public enum Quota { /** * Is quota violated? - * The quota is violated if quota is set, delta > 0 and usage + delta > quota. + * The quota is violated if quota is set, delta > 0 and + * usage + delta > quota. */ static boolean isViolated(final long quota, final long usage, final long delta) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java index a8acccd..2e13df5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java @@ -319,7 +319,7 @@ public class ReencryptionHandler implements Runnable { /** * Main loop. It takes at most 1 zone per scan, and executes until the zone * is completed. - * {@see #reencryptEncryptionZoneInt(Long)}. + * {@link #reencryptEncryptionZone(long)}. */ @Override public void run() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index e1bf027..b6f4f64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -31,7 +31,7 @@ import com.google.common.base.Preconditions; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; /** - * There are four types of extended attributes <XAttr> defined by the + * There are four types of extended attributes <XAttr> defined by the * following namespaces: * <br> * USER - extended user attributes: these can be assigned to files and @@ -56,7 +56,7 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_ * is called on a file or directory in the /.reserved/raw HDFS directory * hierarchy. These attributes can only be accessed by the user who have * read access. - * </br> + * <br> */ @InterfaceAudience.Private public class XAttrPermissionFilter { http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java index 1dab69c..d856f6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java @@ -33,7 +33,7 @@ public class XAttrStorage { /** * Reads the extended attribute of an inode by name with prefix. - * <p/> + * <p> * * @param inode INode to read * @param snapshotId the snapshotId of the requested path @@ -48,11 +48,11 @@ public class XAttrStorage { /** * Reads the existing extended attributes of an inode. - * <p/> + * <p> * Must be called while holding the FSDirectory read lock. * * @param inodeAttr INodeAttributes to read. - * @return List<XAttr> <code>XAttr</code> list. + * @return {@code XAttr} list. */ public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) { XAttrFeature f = inodeAttr.getXAttrFeature(); @@ -61,7 +61,7 @@ public class XAttrStorage { /** * Update xattrs of inode. - * <p/> + * <p> * Must be called while holding the FSDirectory write lock. * * @param inode INode to update http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index d115656..1ba59a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -157,10 +157,10 @@ abstract class AbstractINodeDiffList<N extends INode, /** * Find the latest snapshot before a given snapshot. - * @param anchorId The returned snapshot's id must be <= or < this given - * snapshot id. - * @param exclusive True means the returned snapshot's id must be < the given - * id, otherwise <=. + * @param anchorId The returned snapshot's id must be <= or < this + * given snapshot id. + * @param exclusive True means the returned snapshot's id must be < the + * given id, otherwise <=. * @return The id of the latest snapshot before the given snapshot. */ public final int getPrior(int anchorId, boolean exclusive) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java index 85d9a6d..705b8d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java @@ -44,15 +44,15 @@ import java.util.Objects; * and created after a particular snapshot and before the next snapshot. The * sequence will look like this: * <p> - * s0->s1->s2->s3->s4->s5->s6->s7->s8->s9. + * {@literal s0->s1->s2->s3->s4->s5->s6->s7->s8->s9}. * <p> * Assuming a skip interval of 3, which means a new diff will be added at a * level higher than the current level after we have ore than 3 snapshots. * Next level promotion happens after 9 snapshots and so on. * <p> - * level 2: s08------------------------------->s9 - * level 1: S02------->s35-------->s68-------->s9 - * level 0: s0->s1->s2->s3->s4->s5->s6->s7->s8->s9 + * level 2: {@literal s08------------------------------->s9} + * level 1: {@literal S02------->s35-------->s68-------->s9} + * level 0: {@literal s0->s1->s2->s3->s4->s5->s6->s7->s8->s9} * <p> * s02 will be created by combining diffs for s0, s1, s2 once s3 gets created. * Similarly, s08 will be created by combining s02, s35 and s68 once s9 gets @@ -143,6 +143,7 @@ public class DiffListBySkipList implements DiffList<DirectoryDiff> { * and level. * * @param diff The element to be stored in the node. + * @param level */ SkipListNode(DirectoryDiff diff, int level) { this.diff = diff; http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java index 02b9cff..b3f8de9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java @@ -106,8 +106,8 @@ public class BlockStorageMovementNeeded { * Add the itemInfo to tracking list for which storage movement expected if * necessary. * - * @param itemInfoList - * - List of child in the directory + * @param itemInfo + * - child in the directory * @param scanCompleted * -Indicates whether the ItemInfo start id directory has no more * elements to scan. @@ -191,7 +191,6 @@ public class BlockStorageMovementNeeded { /** * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded * and notify to clean up required resources. - * @throws IOException */ public synchronized void clearQueuesWithNotification() { // Remove xAttr from directories http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java index d4e514b..be8d01f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java @@ -69,7 +69,7 @@ public class DatanodeCacheManager { /** * Returns the live datanodes and its storage details, which has available - * space (> 0) to schedule block moves. This will return array of datanodes + * space (> 0) to schedule block moves. This will return array of datanodes * from its local cache. It has a configurable refresh interval in millis and * periodically refresh the datanode cache by fetching latest * {@link Context#getLiveDatanodeStorageReport()} once it elapsed refresh http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java index 074eab6..14cf05d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java @@ -39,7 +39,7 @@ import com.google.common.annotations.VisibleForTesting; * configured by the administrator. * * <p> - * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then + * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then * it won't do anything, just maintains the sps invoked path ids. Administrator * requires to start external sps service explicitly, to fetch the sps invoked * path ids from namenode, then do necessary computations and block movement in @@ -48,7 +48,7 @@ import com.google.common.annotations.VisibleForTesting; * external sps service functionality. * * <p> - * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then it + * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then it * will disable the sps feature completely by clearing all queued up sps path's * hint. * @@ -88,12 +88,12 @@ public class StoragePolicySatisfyManager { * This function will do following logic based on the configured sps mode: * * <p> - * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then + * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then * it won't do anything. Administrator requires to start external sps service * explicitly. * * <p> - * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the + * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the * service is disabled and won't do any action. */ public void start() { @@ -121,12 +121,12 @@ public class StoragePolicySatisfyManager { * This function will do following logic based on the configured sps mode: * * <p> - * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then + * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then * it won't do anything. Administrator requires to stop external sps service * explicitly, if needed. * * <p> - * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the + * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the * service is disabled and won't do any action. */ public void stop() { @@ -225,6 +225,7 @@ public class StoragePolicySatisfyManager { /** * Verify that satisfier queue limit exceeds allowed outstanding limit. + * @throws IOException */ public void verifyOutstandingPathQLimit() throws IOException { long size = pathsToBeTraveresed.size(); @@ -269,6 +270,7 @@ public class StoragePolicySatisfyManager { /** * Adds the sps path to SPSPathIds list. + * @param id */ public void addPathId(long id) { synchronized (pathsToBeTraveresed) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java index 3101741..a4453a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java @@ -179,7 +179,7 @@ public class StartupProgressView { /** * Returns all phases. * - * @return Iterable<Phase> containing all phases + * @return {@code Iterable<Phase>} containing all phases */ public Iterable<Phase> getPhases() { return EnumSet.allOf(Phase.class); @@ -189,7 +189,7 @@ public class StartupProgressView { * Returns all steps within a phase. * * @param phase Phase to get - * @return Iterable<Step> all steps + * @return {@code Iterable<Step>} all steps */ public Iterable<Step> getSteps(Phase phase) { return new TreeSet<Step>(phases.get(phase).steps.keySet()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java index 4d61d0f..72ec9f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java @@ -47,22 +47,22 @@ import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowMan /** * The interface to the top metrics. - * <p/> + * <p> * Metrics are collected by a custom audit logger, {@link org.apache.hadoop * .hdfs.server.namenode.top.TopAuditLogger}, which calls TopMetrics to * increment per-operation, per-user counts on every audit log call. These * counts are used to show the top users by NameNode operation as well as * across all operations. - * <p/> + * <p> * TopMetrics maintains these counts for a configurable number of time * intervals, e.g. 1min, 5min, 25min. Each interval is tracked by a * RollingWindowManager. - * <p/> + * <p> * These metrics are published as a JSON string via {@link org.apache.hadoop * .hdfs.server .namenode.metrics.FSNamesystemMBean#getTopWindows}. This is * done by calling {@link org.apache.hadoop.hdfs.server.namenode.top.window * .RollingWindowManager#snapshot} on each RollingWindowManager. - * <p/> + * <p> * Thread-safe: relies on thread-safety of RollingWindowManager */ @InterfaceAudience.Private @@ -119,6 +119,13 @@ public class TopMetrics implements MetricsSource { * log file. This is to be consistent when {@link TopMetrics} is charged with * data read back from log files instead of being invoked directly by the * FsNamesystem + * @param succeeded + * @param userName + * @param addr + * @param cmd + * @param src + * @param dst + * @param status */ public void report(boolean succeeded, String userName, InetAddress addr, String cmd, String src, String dst, FileStatus status) { @@ -147,6 +154,8 @@ public class TopMetrics implements MetricsSource { * {@link org.apache.hadoop.metrics2.MetricsRecord}s for consumption by * external metrics systems. Each metrics record added corresponds to the * reporting period a.k.a window length of the configured rolling windows. + * @param collector + * @param all */ @Override public void getMetrics(MetricsCollector collector, boolean all) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java index 63ff125..f927106 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java @@ -29,23 +29,24 @@ import org.slf4j.LoggerFactory; * Events are reported based on occurrence time. The total number of events in * the last period covered by the rolling window can be retrieved by the * {@link #getSum(long)} method. - * <p/> + * <p> * * Assumptions: - * <p/> + * <p> * * (1) Concurrent invocation of {@link #incAt} method are possible - * <p/> + * <p> * * (2) The time parameter of two consecutive invocation of {@link #incAt} could * be in any given order - * <p/> + * <p> * * (3) The buffering delays are not more than the window length, i.e., after two * consecutive invocation {@link #incAt(long time1, long)} and - * {@link #incAt(long time2, long)}, time1 < time2 || time1 - time2 < windowLenMs. + * {@link #incAt(long time2, long)}, time1 < time2 || time1 - time2 < + * windowLenMs. * This assumption helps avoiding unnecessary synchronizations. - * <p/> + * <p> * * Thread-safety is built in the {@link RollingWindow.Bucket} */ @@ -85,7 +86,7 @@ public class RollingWindow { /** * When an event occurs at the specified time, this method reflects that in * the rolling window. - * <p/> + * <p> * * @param time the time at which the event occurred * @param delta the delta that will be added to the window @@ -153,6 +154,7 @@ public class RollingWindow { * performed. We do not need to update the {@link #updateTime} because as * long as the {@link #updateTime} belongs to the current view of the * rolling window, the algorithm works fine. + * @param delta */ void inc(long delta) { value.addAndGet(delta); @@ -161,7 +163,7 @@ public class RollingWindow { /** * Get value represented by this window at the specified time - * <p/> + * <p> * * If time lags behind the latest update time, the new updates are still * included in the sum http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java index bdd0ab0..095294e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * A class to manage the set of {@link RollingWindow}s. This class is the * interface of metrics system to the {@link RollingWindow}s to retrieve the * current top metrics. - * <p/> + * <p> * Thread-safety is provided by each {@link RollingWindow} being thread-safe as * well as {@link ConcurrentHashMap} for the collection of them. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java index e90317d..6b0b261 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java @@ -30,11 +30,12 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; * * Upon receiving this command, this DataNode pass the array of block movement * details to - * {@link org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker} - * service. Later, StoragePolicySatisfyWorker will schedule block movement tasks - * for these blocks and monitors the completion of each task. After the block - * movement attempt is finished(with success or failure) this DataNode will send - * response back to NameNode about the block movement attempt finished details. + * {@link org.apache.hadoop.hdfs.server.sps.ExternalSPSBlockMoveTaskHandler} + * service. Later, ExternalSPSBlockMoveTaskHandler will schedule block movement + * tasks for these blocks and monitors the completion of each task. After the + * block movement attempt is finished(with success or failure) this DataNode + * will send response back to NameNode about the block movement attempt + * finished details. */ public class BlockStorageMovementCommand extends DatanodeCommand { private final String blockPoolId; http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 311b68f..5680ef3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -158,7 +158,7 @@ public interface DatanodeProtocol { * {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[], BlockReportContext)}, * which is used to communicated blocks stored on disk. * - * @param The datanode registration. + * @param registration The datanode registration. * @param poolId The block pool ID for the blocks. * @param blockIds A list of block IDs. * @return The DatanodeCommand. http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java index f80477b..5d609de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java @@ -73,7 +73,7 @@ public interface NamenodeProtocol { * @param datanode a data node * @param size requested size * @param minBlockSize each block should be of this minimum Block Size - * @return a list of blocks & their locations + * @return BlocksWithLocations a list of blocks & their locations * @throws IOException if size is less than or equal to 0 or datanode does not exist */ @@ -183,7 +183,8 @@ public interface NamenodeProtocol { /** * Return a structure containing details about all edit logs * available to be fetched from the NameNode. - * @param sinceTxId return only logs that contain transactions >= sinceTxId + * @param sinceTxId return only logs that contain transactions {@literal >=} + * sinceTxId */ @Idempotent public RemoteEditLogManifest getEditLogManifest(long sinceTxId) http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java index 3ea0294..64dec8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java @@ -36,8 +36,10 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index 3171497..2afc97c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -50,14 +50,17 @@ import org.apache.hadoop.util.ToolRunner; * <p>The tool scans all files and directories, starting from an indicated * root path. The following abnormal conditions are detected and handled:</p> * <ul> - * <li>files with blocks that are completely missing from all datanodes.<br/> + * <li>files with blocks that are completely missing from all datanodes.<br> * In this case the tool can perform one of the following actions: * <ul> - * <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li> * <li>move corrupted files to /lost+found directory on DFS - * ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a - * block chains, representing longest consecutive series of valid blocks.</li> - * <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li> + * ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#doMove}). + * Remaining data blocks are saved as a + * block chains, representing longest consecutive series of valid blocks. + * </li> + * <li>delete corrupted files + * ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#doDelete}) + * </li> * </ul> * </li> * <li>detect files with under-replicated or over-replicated blocks</li> http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java index f075ed2..43eedf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java @@ -121,8 +121,8 @@ public class OfflineEditsViewer extends Configured implements Tool { /** Process an edit log using the chosen processor or visitor. * - * @param inputFilename The file to process - * @param outputFilename The output file name + * @param inputFileName The file to process + * @param outputFileName The output file name * @param processor If visitor is null, the processor to use * @param visitor If non-null, the visitor to use. * http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java index eb477e1..1383f4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java @@ -52,7 +52,7 @@ abstract public interface OfflineEditsVisitor { * Begin visiting an element that encloses another element, such as * the beginning of the list of blocks that comprise a file. * - * @param value Token being visited + * @param op Token being visited */ abstract void visitOp(FSEditLogOp op) throws IOException; http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java index c84e2ed..cc97ea7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java @@ -50,9 +50,7 @@ public class StatisticsEditsVisitor implements OfflineEditsVisitor { * Create a processor that writes to the file named and may or may not * also output to the screen, as specified. * - * @param filename Name of file to write output to - * @param tokenizer Input tokenizer - * @param printToScreen Mirror output to screen? + * @param out Name of file to write output to */ public StatisticsEditsVisitor(OutputStream out) throws IOException { this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8)); http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java index 28bcf10..9c7b7f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java @@ -26,11 +26,13 @@ import org.apache.hadoop.classification.InterfaceAudience; /** * File name distribution visitor. * <p> - * It analyzes file names in fsimage and prints the following information: + * It analyzes file names in fsimage and prints the following information: + * <ul> * <li>Number of unique file names</li> * <li>Number file names and the corresponding number range of files that use * these same names</li> * <li>Heap saved if the file name objects are reused</li> + * </ul> */ @InterfaceAudience.Private public class NameDistributionVisitor extends TextWriterImageVisitor { http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java index 1f87a7a..188537b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java @@ -470,23 +470,23 @@ public class Diff<K, E extends Diff.Element<K>> { * <pre> * 1. For (c, 0) in the posterior diff, check the element in this diff: * 1.1 (c', 0) in this diff: impossible - * 1.2 (0, d') in this diff: put in c-list --> (c, d') + * 1.2 (0, d') in this diff: put in c-list --> (c, d') * 1.3 (c', d') in this diff: impossible - * 1.4 (0, 0) in this diff: put in c-list --> (c, 0) + * 1.4 (0, 0) in this diff: put in c-list --> (c, 0) * This is the same logic as create(E). * * 2. For (0, d) in the posterior diff, - * 2.1 (c', 0) in this diff: remove from c-list --> (0, 0) + * 2.1 (c', 0) in this diff: remove from c-list --> (0, 0) * 2.2 (0, d') in this diff: impossible - * 2.3 (c', d') in this diff: remove from c-list --> (0, d') - * 2.4 (0, 0) in this diff: put in d-list --> (0, d) + * 2.3 (c', d') in this diff: remove from c-list --> (0, d') + * 2.4 (0, 0) in this diff: put in d-list --> (0, d) * This is the same logic as delete(E). * * 3. For (c, d) in the posterior diff, - * 3.1 (c', 0) in this diff: replace the element in c-list --> (c, 0) + * 3.1 (c', 0) in this diff: replace the element in c-list --> (c, 0) * 3.2 (0, d') in this diff: impossible - * 3.3 (c', d') in this diff: replace the element in c-list --> (c, d') - * 3.4 (0, 0) in this diff: put in c-list and d-list --> (c, d) + * 3.3 (c', d') in this diff: replace the element in c-list --> (c, d') + * 3.4 (0, 0) in this diff: put in c-list and d-list --> (c, d) * This is the same logic as modify(E, E). * </pre> * http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java index f23b021..8c783d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java @@ -116,8 +116,8 @@ public class XMLUtils { * * There are three kinds of code points in XML: * - Those that can be represented normally, - * - Those that have to be escaped (for example, & must be represented - * as &) + * - Those that have to be escaped (for example, & must be represented + * as {@literal &}) * - Those that cannot be represented at all in XML. * * The built-in SAX functions will handle the first two types for us just --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org