http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java index a16f439..a67cafe 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java @@ -42,7 +42,7 @@ public final class ScopedAclEntries { * list is already sorted such that all access entries precede all default * entries. * - * @param aclEntries List<AclEntry> to separate + * @param aclEntries List<AclEntry> to separate */ public ScopedAclEntries(List<AclEntry> aclEntries) { int pivot = calculatePivotOnDefaultEntries(aclEntries); @@ -59,8 +59,8 @@ public final class ScopedAclEntries { /** * Returns access entries. * - * @return List<AclEntry> containing just access entries, or an empty list if - * there are no access entries + * @return List<AclEntry> containing just access entries, or an empty + * list if there are no access entries */ public List<AclEntry> getAccessEntries() { return accessEntries; @@ -69,8 +69,8 @@ public final class ScopedAclEntries { /** * Returns default entries. * - * @return List<AclEntry> containing just default entries, or an empty list if - * there are no default entries + * @return List<AclEntry> containing just default entries, or an empty + * list if there are no default entries */ public List<AclEntry> getDefaultEntries() { return defaultEntries; @@ -78,8 +78,8 @@ public final class ScopedAclEntries { /** * Returns the pivot point in the list between the access entries and the - * default entries. This is the index of the first element in the list that is - * a default entry. + * default entries. This is the index of the first element in the list that + * is a default entry. * * @param aclBuilder ArrayList<AclEntry> containing entries to build * @return int pivot point, or -1 if list contains no default entries
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java index a4746cf..3eef278 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java @@ -148,16 +148,16 @@ abstract public class Command extends Configured { * expand arguments, and then process each argument. * <pre> * run - * |-> {@link #processOptions(LinkedList)} - * \-> {@link #processRawArguments(LinkedList)} - * |-> {@link #expandArguments(LinkedList)} - * | \-> {@link #expandArgument(String)}* - * \-> {@link #processArguments(LinkedList)} - * |-> {@link #processArgument(PathData)}* - * | |-> {@link #processPathArgument(PathData)} - * | \-> {@link #processPaths(PathData, PathData...)} - * | \-> {@link #processPath(PathData)}* - * \-> {@link #processNonexistentPath(PathData)} + * |{@literal ->} {@link #processOptions(LinkedList)} + * \{@literal ->} {@link #processRawArguments(LinkedList)} + * |{@literal ->} {@link #expandArguments(LinkedList)} + * | \{@literal ->} {@link #expandArgument(String)}* + * \{@literal ->} {@link #processArguments(LinkedList)} + * |{@literal ->} {@link #processArgument(PathData)}* + * | |{@literal ->} {@link #processPathArgument(PathData)} + * | \{@literal ->} {@link #processPaths(PathData, PathData...)} + * | \{@literal ->} {@link #processPath(PathData)}* + * \{@literal ->} {@link #processNonexistentPath(PathData)} * </pre> * Most commands will chose to implement just * {@link #processOptions(LinkedList)} and {@link #processPath(PathData)} @@ -292,8 +292,8 @@ abstract public class Command extends Configured { /** * This is the last chance to modify an argument before going into the * (possibly) recursive {@link #processPaths(PathData, PathData...)} - * -> {@link #processPath(PathData)} loop. Ex. ls and du use this to - * expand out directories. + * {@literal ->} {@link #processPath(PathData)} loop. Ex. ls and du use + * this to expand out directories. * @param item a {@link PathData} representing a path which exists * @throws IOException if anything goes wrong... */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java index bf30b22..4dd20d1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java @@ -162,7 +162,7 @@ public class CommandFormat { /** Returns all the options that are set * - * @return Set<String> of the enabled options + * @return Set{@literal <}String{@literal >} of the enabled options */ public Set<String> getOpts() { Set<String> optSet = new HashSet<String>(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 463b14c..8366eac 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -75,16 +75,16 @@ import org.apache.hadoop.util.Time; * one or more individual file systems (a localFs or Hdfs, S3fs, etc). * For example one could have a mount table that provides links such as * <ul> - * <li> /user -> hdfs://nnContainingUserDir/user - * <li> /project/foo -> hdfs://nnProject1/projects/foo - * <li> /project/bar -> hdfs://nnProject2/projects/bar - * <li> /tmp -> hdfs://nnTmp/privateTmpForUserXXX + * <li> /user {@literal ->} hdfs://nnContainingUserDir/user + * <li> /project/foo {@literal ->} hdfs://nnProject1/projects/foo + * <li> /project/bar {@literal ->} hdfs://nnProject2/projects/bar + * <li> /tmp {@literal ->} hdfs://nnTmp/privateTmpForUserXXX * </ul> * * ViewFs is specified with the following URI: <b>viewfs:///</b> * <p> * To use viewfs one would typically set the default file system in the - * config (i.e. fs.defaultFS < = viewfs:///) along with the + * config (i.e. fs.defaultFS {@literal <} = viewfs:///) along with the * mount table config variables as described below. * * <p> @@ -132,7 +132,7 @@ import org.apache.hadoop.util.Time; * (because they do not fit on one) then one could specify a mount * entry such as following merges two dirs: * <ul> - * <li> /user -> hdfs://nnUser1/user,hdfs://nnUser2/user + * <li> /user {@literal ->} hdfs://nnUser1/user,hdfs://nnUser2/user * </ul> * Such a mergeLink can be specified with the following config var where "," * is used as the separator for each of links to be merged: http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java index d099ca7..12de2ef 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java @@ -54,10 +54,10 @@ import org.slf4j.LoggerFactory; * Apache Zookeeper. Using Zookeeper as a coordination service, leader election * can be performed by atomically creating an ephemeral lock file (znode) on * Zookeeper. The service instance that successfully creates the znode becomes - * active and the rest become standbys. <br/> + * active and the rest become standbys. <br> * This election mechanism is only efficient for small number of election * candidates (order of 10's) because contention on single znode by a large - * number of candidates can result in Zookeeper overload. <br/> + * number of candidates can result in Zookeeper overload. <br> * The elector does not guarantee fencing (protection of shared resources) among * service instances. After it has notified an instance about becoming a leader, * then that instance must ensure that it meets the service consistency @@ -70,10 +70,10 @@ import org.slf4j.LoggerFactory; public class ActiveStandbyElector implements StatCallback, StringCallback { /** - * Callback interface to interact with the ActiveStandbyElector object. <br/> + * Callback interface to interact with the ActiveStandbyElector object. <br> * The application will be notified with a callback only on state changes * (i.e. there will never be successive calls to becomeActive without an - * intermediate call to enterNeutralMode). <br/> + * intermediate call to enterNeutralMode). <br> * The callbacks will be running on Zookeeper client library threads. The * application should return from these callbacks quickly so as not to impede * Zookeeper client library performance and notifications. The app will @@ -105,7 +105,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { * interface. The service may choose to ignore this or stop doing state * changing operations. Upon reconnection, the elector verifies the leader * status and calls back on the becomeActive and becomeStandby app - * interfaces. <br/> + * interfaces. <br> * Zookeeper disconnects can happen due to network issues or loss of * Zookeeper quorum. Thus enterNeutralMode can be used to guard against * split-brain issues. In such situations it might be prudent to call @@ -178,12 +178,12 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { private ZooKeeper monitorLockNodeClient; /** - * Create a new ActiveStandbyElector object <br/> + * Create a new ActiveStandbyElector object <br> * The elector is created by providing to it the Zookeeper configuration, the * parent znode under which to create the znode and a reference to the - * callback interface. <br/> + * callback interface. <br> * The parent znode name must be the same for all service instances and - * different across services. <br/> + * different across services. <br> * After the leader has been lost, a new leader will be elected after the * session timeout expires. Hence, the app must set this parameter based on * its needs for failure response time. The session timeout must be greater @@ -217,12 +217,12 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { } /** - * Create a new ActiveStandbyElector object <br/> + * Create a new ActiveStandbyElector object <br> * The elector is created by providing to it the Zookeeper configuration, the * parent znode under which to create the znode and a reference to the - * callback interface. <br/> + * callback interface. <br> * The parent znode name must be the same for all service instances and - * different across services. <br/> + * different across services. <br> * After the leader has been lost, a new leader will be elected after the * session timeout expires. Hence, the app must set this parameter based on * its needs for failure response time. The session timeout must be greater @@ -278,9 +278,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { /** * To participate in election, the app will call joinElection. The result will * be notified by a callback on either the becomeActive or becomeStandby app - * interfaces. <br/> + * interfaces. <br> * After this the elector will automatically monitor the leader status and - * perform re-election if necessary<br/> + * perform re-election if necessary<br> * The app could potentially start off in standby mode and ignore the * becomeStandby call. * @@ -397,11 +397,11 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { /** * Any service instance can drop out of the election by calling quitElection. - * <br/> + * <br> * This will lose any leader status, if held, and stop monitoring of the lock - * node. <br/> + * node. <br> * If the instance wants to participate in election again, then it needs to - * call joinElection(). <br/> + * call joinElection(). <br> * This allows service instances to take themselves out of rotation for known * impending unavailable states (e.g. long GC pause or software upgrade). * http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index e955979..2107bf1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -372,7 +372,7 @@ public abstract class HAAdmin extends Configured implements Tool { /** * Return the serviceId as is, we are assuming it was - * given as a service address of form <host:ipcport>. + * given as a service address of form {@literal <}host:ipcport{@literal >}. */ protected String getServiceAddr(String serviceId) { return serviceId; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java index 2247a34..64e7315 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java @@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory; * <code>com.example.foo.MyMethod</code> * The class provided must implement the {@link FenceMethod} interface. * The fencing methods that ship with Hadoop may also be referred to - * by shortened names:<p> + * by shortened names:<br> * <ul> * <li><code>shell(/path/to/some/script.sh args...)</code></li> * <li><code>sshfence(...)</code> (see {@link SshFenceByTcpPort}) http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java index 9ae113b..e0c2f4d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java @@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory; * with ssh. * <p> * In order to achieve passwordless SSH, the operator must also configure - * <code>dfs.ha.fencing.ssh.private-key-files<code> to point to an + * <code>dfs.ha.fencing.ssh.private-key-files</code> to point to an * SSH key that has passphrase-less access to the given username and host. */ public class SshFenceByTcpPort extends Configured http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index d2ba469..a9c2319 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -107,9 +107,9 @@ import org.slf4j.LoggerFactory; /** * Create a Jetty embedded server to answer http requests. The primary goal is * to serve up status information for the server. There are three contexts: - * "/logs/" -> points to the log directory "/static/" -> points to common static - * files (src/webapps/static) "/" -> the jsp server code from - * (src/webapps/<name>) + * "/logs/" {@literal ->} points to the log directory "/static/" {@literal ->} + * points to common static files (src/webapps/static) "/" {@literal ->} the + * jsp server code from (src/webapps/{@literal <}name{@literal >}) * * This class is a fork of the old HttpServer. HttpServer exists for * compatibility reasons. See HBASE-10336 for more details. @@ -1364,10 +1364,10 @@ public final class HttpServer2 implements FilterContainer { /** * Checks the user has privileges to access to instrumentation servlets. - * <p/> + * <p> * If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE * (default value) it always returns TRUE. - * <p/> + * <p> * If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE * it will check that if the current user is in the admin ACLS. If the user is * in the admin ACLs it returns TRUE, otherwise it returns FALSE. http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java index dc430cc..be86159 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java @@ -83,7 +83,7 @@ public class EnumSetWritable<E extends Enum<E>> extends AbstractCollection<E> /** * reset the EnumSetWritable with specified - * <tt>value</value> and <tt>elementType</tt>. If the <tt>value</tt> argument + * <tt>value</tt> and <tt>elementType</tt>. If the <tt>value</tt> argument * is null or its size is zero, the <tt>elementType</tt> argument must not be * null. If the argument <tt>value</tt>'s size is bigger than zero, the * argument <tt>elementType</tt> is not be used. http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java index 3708a3b..5bbfba3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java @@ -370,7 +370,7 @@ public class IOUtils { } /** - * Return the complete list of files in a directory as strings.<p/> + * Return the complete list of files in a directory as strings.<p> * * This is better than File#listDir because it does not ignore IOExceptions. * http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java index 7cd7f98..804d365 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java @@ -80,7 +80,7 @@ public class ReadaheadPool { * @param readaheadLength the configured length to read ahead * @param maxOffsetToRead the maximum offset that will be readahead * (useful if, for example, only some segment of the file is - * requested by the user). Pass {@link Long.MAX_VALUE} to allow + * requested by the user). Pass {@link Long#MAX_VALUE} to allow * readahead to the end of the file. * @param lastReadahead the result returned by the previous invocation * of this function on this file descriptor, or null if this is http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java index 252ee4c..9d3c3c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java @@ -37,7 +37,7 @@ import com.google.common.annotations.VisibleForTesting; /** * This class provides secure APIs for opening and creating files on the local * disk. The main issue this class tries to handle is that of symlink traversal. - * <br/> + * <br> * An example of such an attack is: * <ol> * <li> Malicious user removes his task's syslog file, and puts a link to the @@ -50,7 +50,7 @@ import com.google.common.annotations.VisibleForTesting; * </ol> * A similar attack is possible involving task log truncation, but in that case * due to an insecure write to a file. - * <br/> + * <br> */ public class SecureIOUtils { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java index f42848b..9afa621 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java @@ -79,7 +79,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU * values. * </li> * <li> - * <code>BlockCompressWriter</code> : Block-compressed files, both keys & + * <code>BlockCompressWriter</code> : Block-compressed files, both keys & * values are collected in 'blocks' * separately and compressed. The size of * the 'block' is configurable. @@ -94,13 +94,13 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU * <p>The {@link SequenceFile.Reader} acts as the bridge and can read any of the * above <code>SequenceFile</code> formats.</p> * - * <h4 id="Formats">SequenceFile Formats</h4> + * <h3 id="Formats">SequenceFile Formats</h3> * * <p>Essentially there are 3 different formats for <code>SequenceFile</code>s * depending on the <code>CompressionType</code> specified. All of them share a * <a href="#Header">common header</a> described below. * - * <h5 id="Header">SequenceFile Header</h5> + * <h4 id="Header">SequenceFile Header</h4> * <ul> * <li> * version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual @@ -133,7 +133,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU * </li> * </ul> * - * <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5> + * <h5>Uncompressed SequenceFile Format</h5> * <ul> * <li> * <a href="#Header">Header</a> @@ -152,7 +152,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU * </li> * </ul> * - * <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5> + * <h5>Record-Compressed SequenceFile Format</h5> * <ul> * <li> * <a href="#Header">Header</a> @@ -171,7 +171,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU * </li> * </ul> * - * <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5> + * <h5>Block-Compressed SequenceFile Format</h5> * <ul> * <li> * <a href="#Header">Header</a> @@ -1935,8 +1935,8 @@ public class SequenceFile { * @param fs The file system used to open the file. * @param file The file being read. * @param bufferSize The buffer size used to read the file. - * @param length The length being read if it is >= 0. Otherwise, - * the length is not available. + * @param length The length being read if it is {@literal >=} 0. + * Otherwise, the length is not available. * @return The opened stream. * @throws IOException */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java index f0fe6fb..b94de6c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java @@ -37,7 +37,7 @@ import org.apache.hadoop.classification.InterfaceStability; * and returns the instance.</p> * * <p>Example:</p> - * <p><blockquote><pre> + * <blockquote><pre> * public class MyWritable implements Writable { * // Some data * private int counter; @@ -62,7 +62,7 @@ import org.apache.hadoop.classification.InterfaceStability; * return w; * } * } - * </pre></blockquote></p> + * </pre></blockquote> */ @InterfaceAudience.Public @InterfaceStability.Stable http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparable.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparable.java index b030481..c1208aa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparable.java @@ -36,8 +36,9 @@ import org.apache.hadoop.classification.InterfaceStability; * satisfy this property.</p> * * <p>Example:</p> - * <p><blockquote><pre> - * public class MyWritableComparable implements WritableComparable<MyWritableComparable> { + * <blockquote><pre> + * public class MyWritableComparable implements + * WritableComparable{@literal <MyWritableComparable>} { * // Some data * private int counter; * private long timestamp; @@ -66,7 +67,7 @@ import org.apache.hadoop.classification.InterfaceStability; * return result * } * } - * </pre></blockquote></p> + * </pre></blockquote> */ @InterfaceAudience.Public @InterfaceStability.Stable http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java index e58e0e1..2062fb6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java @@ -236,7 +236,8 @@ public final class WritableUtils { /** * Serializes an integer to a binary stream with zero-compressed encoding. - * For -112 <= i <= 127, only one byte is used with the actual value. + * For -112 {@literal <=} i {@literal <=} 127, only one byte is used with the + * actual value. * For other values of i, the first byte value indicates whether the * integer is positive or negative, and the number of bytes that follow. * If the first byte value v is between -113 and -116, the following integer @@ -255,7 +256,8 @@ public final class WritableUtils { /** * Serializes a long to a binary stream with zero-compressed encoding. - * For -112 <= i <= 127, only one byte is used with the actual value. + * For -112 {@literal <=} i {@literal <=} 127, only one byte is used with the + * actual value. * For other values of i, the first byte value indicates whether the * long is positive or negative, and the number of bytes that follow. * If the first byte value v is between -113 and -120, the following long http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java index 3701f20..e248120 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java @@ -227,9 +227,9 @@ public class CompressionCodecFactory { /** * Find the relevant compression codec for the codec's canonical class name * or by codec alias. - * <p/> + * <p> * Codec aliases are case insensitive. - * <p/> + * <p> * The code alias is the short class name (without the package name). * If the short class name ends with 'Codec', then there are two aliases for * the codec, the complete short class name and the short class name without @@ -255,9 +255,9 @@ public class CompressionCodecFactory { /** * Find the relevant compression codec for the codec's canonical class name * or by codec alias and returns its implemetation class. - * <p/> + * <p> * Codec aliases are case insensitive. - * <p/> + * <p> * The code alias is the short class name (without the package name). * If the short class name ends with 'Codec', then there are two aliases for * the codec, the complete short class name and the short class name without http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java index 61462c0..45b5e9c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java @@ -61,9 +61,9 @@ public class Lz4Codec implements Configurable, CompressionCodec { } /** - * Are the native lz4 libraries loaded & initialized? + * Are the native lz4 libraries loaded & initialized? * - * @return true if loaded & initialized, otherwise false + * @return true if loaded & initialized, otherwise false */ public static boolean isNativeCodeLoaded() { return NativeCodeLoader.isNativeCodeLoaded(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java index 20a4cd6..cd0c788 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java @@ -57,7 +57,7 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp } /** - * Are the native snappy libraries loaded & initialized? + * Are the native snappy libraries loaded & initialized? */ public static void checkNativeCodeLoaded() { if (!NativeCodeLoader.buildSupportsSnappy()) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java index d4a9787..5713c56 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java @@ -247,7 +247,7 @@ public class Bzip2Compressor implements Compressor { } /** - * Returns the total number of uncompressed bytes input so far.</p> + * Returns the total number of uncompressed bytes input so far. * * @return the total (non-negative) number of uncompressed bytes input so far */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java index 96693ad..72ba976 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java @@ -183,7 +183,7 @@ public class Bzip2Decompressor implements Decompressor { } /** - * Returns the total number of compressed bytes input so far.</p> + * Returns the total number of compressed bytes input so far. * * @return the total (non-negative) number of compressed bytes input so far */ @@ -195,7 +195,7 @@ public class Bzip2Decompressor implements Decompressor { /** * Returns the number of bytes remaining in the input buffers; normally * called when finished() is true to determine amount of post-gzip-stream - * data.</p> + * data. * * @return the total (non-negative) number of unprocessed bytes in input */ @@ -206,7 +206,7 @@ public class Bzip2Decompressor implements Decompressor { } /** - * Resets everything including the input buffers (user and direct).</p> + * Resets everything including the input buffers (user and direct). */ @Override public synchronized void reset() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java index d24b4bf..3af5309 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java @@ -37,11 +37,11 @@ public class Bzip2Factory { private static boolean nativeBzip2Loaded; /** - * Check if native-bzip2 code is loaded & initialized correctly and + * Check if native-bzip2 code is loaded & initialized correctly and * can be loaded for this job. * * @param conf configuration - * @return <code>true</code> if native-bzip2 is loaded & initialized + * @return <code>true</code> if native-bzip2 is loaded & initialized * and can be loaded for this job, else <code>false</code> */ public static synchronized boolean isNativeBzip2Loaded(Configuration conf) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java index bb02cf2..8426d25 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java @@ -200,20 +200,18 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants { } /** - * This method tries to find the marker (passed to it as the first parameter) - * in the stream. It can find bit patterns of length <= 63 bits. Specifically - * this method is used in CBZip2InputStream to find the end of block (EOB) - * delimiter in the stream, starting from the current position of the stream. - * If marker is found, the stream position will be at the byte containing - * the starting bit of the marker. - * - * @param marker The bit pattern to be found in the stream - * @param markerBitLength No of bits in the marker - * @return true if the marker was found otherwise false - * - * @throws IOException - * @throws IllegalArgumentException if marketBitLength is greater than 63 - */ + * This method tries to find the marker (passed to it as the first parameter) + * in the stream. It can find bit patterns of length <= 63 bits. + * Specifically this method is used in CBZip2InputStream to find the end of + * block (EOB) delimiter in the stream, starting from the current position + * of the stream. If marker is found, the stream position will be at the + * byte containing the starting bit of the marker. + * @param marker The bit pattern to be found in the stream + * @param markerBitLength No of bits in the marker + * @return true if the marker was found otherwise false + * @throws IOException + * @throws IllegalArgumentException if marketBitLength is greater than 63 + */ public boolean skipToNextMarker(long marker, int markerBitLength) throws IOException, IllegalArgumentException { try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java index ca4e5cd..850fec7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java @@ -64,7 +64,8 @@ import org.apache.hadoop.io.IOUtils; * </pre> * * <table width="100%" border="1"> - * <colgroup> <col width="33%" /> <col width="33%" /> <col width="33%" /> + * <caption></caption> + * <colgroup> <col width="33%" > <col width="33%" > <col width="33%" > * </colgroup> * <tr> * <th colspan="3">Memory usage by blocksize</th> @@ -614,9 +615,9 @@ public class CBZip2OutputStream extends OutputStream implements BZip2Constants { * @throws IOException * if an I/O error occurs in the specified stream. * @throws IllegalArgumentException - * if <code>(blockSize < 1) || (blockSize > 9)</code>. + * if {@code (blockSize < 1) || (blockSize > 9)} * @throws NullPointerException - * if <code>out == null</code>. + * if {@code out == null}. * * @see #MIN_BLOCKSIZE * @see #MAX_BLOCKSIZE http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java index b4c6659..896d35e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java @@ -404,7 +404,7 @@ public class BuiltInGzipDecompressor implements Decompressor { /** * Returns the total number of compressed bytes input so far, including - * gzip header/trailer bytes.</p> + * gzip header/trailer bytes. * * @return the total (non-negative) number of compressed bytes read so far */ @@ -420,7 +420,7 @@ public class BuiltInGzipDecompressor implements Decompressor { * non-zero value unless called after {@link #setInput(byte[] b, int off, * int len)} and before {@link #decompress(byte[] b, int off, int len)}. * (That is, after {@link #decompress(byte[] b, int off, int len)} it - * always returns zero, except in finished state with concatenated data.)</p> + * always returns zero, except in finished state with concatenated data.) * * @return the total (non-negative) number of unprocessed bytes in input */ @@ -441,7 +441,7 @@ public class BuiltInGzipDecompressor implements Decompressor { /** * Returns true if the end of the gzip substream (single "member") has been - * reached.</p> + * reached. */ @Override public synchronized boolean finished() { @@ -450,7 +450,7 @@ public class BuiltInGzipDecompressor implements Decompressor { /** * Resets everything, including the input buffer, regardless of whether the - * current gzip substream is finished.</p> + * current gzip substream is finished. */ @Override public synchronized void reset() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java index 438c8be..da8a90b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java @@ -435,7 +435,7 @@ public class ZlibCompressor implements Compressor { } /** - * Returns the total number of uncompressed bytes input so far.</p> + * Returns the total number of uncompressed bytes input so far. * * @return the total (non-negative) number of uncompressed bytes input so far */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java index dd550b9..f642d77 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java @@ -243,7 +243,7 @@ public class ZlibDecompressor implements Decompressor { } /** - * Returns the total number of compressed bytes input so far.</p> + * Returns the total number of compressed bytes input so far. * * @return the total (non-negative) number of compressed bytes input so far */ @@ -255,7 +255,7 @@ public class ZlibDecompressor implements Decompressor { /** * Returns the number of bytes remaining in the input buffers; normally * called when finished() is true to determine amount of post-gzip-stream - * data.</p> + * data. * * @return the total (non-negative) number of unprocessed bytes in input */ @@ -266,7 +266,7 @@ public class ZlibDecompressor implements Decompressor { } /** - * Resets everything including the input buffers (user and direct).</p> + * Resets everything including the input buffers (user and direct). */ @Override public void reset() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java index 93b3b6d..07afbab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java @@ -73,11 +73,11 @@ public class ZlibFactory { ZlibFactory.nativeZlibLoaded = isLoaded; } /** - * Check if native-zlib code is loaded & initialized correctly and + * Check if native-zlib code is loaded & initialized correctly and * can be loaded for this job. * * @param conf configuration - * @return <code>true</code> if native-zlib is loaded & initialized + * @return <code>true</code> if native-zlib is loaded & initialized * and can be loaded for this job, else <code>false</code> */ public static boolean isNativeZlibLoaded(Configuration conf) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java index 8ec0e72..5ba6e9c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java @@ -36,7 +36,7 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; /** - * A codec & coder utility to help create coders conveniently. + * A codec & coder utility to help create coders conveniently. * * {@link CodecUtil} includes erasure coder configurations key and default * values such as coder class name and erasure codec option values included http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java index fdb47be..f80fcec 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java @@ -518,7 +518,7 @@ public class GaloisField { /** * Perform Gaussian elimination on the given matrix. This matrix has to be a - * fat matrix (number of rows > number of columns). + * fat matrix (number of rows > number of columns). */ public void gaussianElimination(int[][] matrix) { assert(matrix != null && matrix.length > 0 && matrix[0].length > 0 http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java index c63baa5..09cd282 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java @@ -1308,11 +1308,11 @@ public class TFile { * @param reader * The TFile reader object. * @param beginKey - * Begin key of the scan. If null, scan from the first <K,V> - * entry of the TFile. + * Begin key of the scan. If null, scan from the first + * <K, V> entry of the TFile. * @param endKey - * End key of the scan. If null, scan up to the last <K, V> entry - * of the TFile. + * End key of the scan. If null, scan up to the last <K, V> + * entry of the TFile. * @throws IOException */ protected Scanner(Reader reader, RawComparable beginKey, http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java index 8cb6e0d..17a27f1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java @@ -62,27 +62,33 @@ public final class Utils { * <li>if n in [-32, 127): encode in one byte with the actual value. * Otherwise, * <li>if n in [-20*2^8, 20*2^8): encode in two bytes: byte[0] = n/256 - 52; - * byte[1]=n&0xff. Otherwise, + * byte[1]=n&0xff. Otherwise, * <li>if n IN [-16*2^16, 16*2^16): encode in three bytes: byte[0]=n/2^16 - - * 88; byte[1]=(n>>8)&0xff; byte[2]=n&0xff. Otherwise, + * 88; byte[1]=(n>>8)&0xff; byte[2]=n&0xff. Otherwise, * <li>if n in [-8*2^24, 8*2^24): encode in four bytes: byte[0]=n/2^24 - 112; - * byte[1] = (n>>16)&0xff; byte[2] = (n>>8)&0xff; byte[3]=n&0xff. Otherwise: + * byte[1] = (n>>16)&0xff; byte[2] = (n>>8)&0xff; + * byte[3]=n&0xff. + * Otherwise: * <li>if n in [-2^31, 2^31): encode in five bytes: byte[0]=-125; byte[1] = - * (n>>24)&0xff; byte[2]=(n>>16)&0xff; byte[3]=(n>>8)&0xff; byte[4]=n&0xff; + * (n>>24)&0xff; byte[2]=(n>>16)&0xff; + * byte[3]=(n>>8)&0xff; byte[4]=n&0xff; * <li>if n in [-2^39, 2^39): encode in six bytes: byte[0]=-124; byte[1] = - * (n>>32)&0xff; byte[2]=(n>>24)&0xff; byte[3]=(n>>16)&0xff; - * byte[4]=(n>>8)&0xff; byte[5]=n&0xff + * (n>>32)&0xff; byte[2]=(n>>24)&0xff; + * byte[3]=(n>>16)&0xff; byte[4]=(n>>8)&0xff; + * byte[5]=n&0xff * <li>if n in [-2^47, 2^47): encode in seven bytes: byte[0]=-123; byte[1] = - * (n>>40)&0xff; byte[2]=(n>>32)&0xff; byte[3]=(n>>24)&0xff; - * byte[4]=(n>>16)&0xff; byte[5]=(n>>8)&0xff; byte[6]=n&0xff; + * (n>>40)&0xff; byte[2]=(n>>32)&0xff; + * byte[3]=(n>>24)&0xff; byte[4]=(n>>16)&0xff; + * byte[5]=(n>>8)&0xff; byte[6]=n&0xff; * <li>if n in [-2^55, 2^55): encode in eight bytes: byte[0]=-122; byte[1] = - * (n>>48)&0xff; byte[2] = (n>>40)&0xff; byte[3]=(n>>32)&0xff; - * byte[4]=(n>>24)&0xff; byte[5]=(n>>16)&0xff; byte[6]=(n>>8)&0xff; - * byte[7]=n&0xff; + * (n>>48)&0xff; byte[2] = (n>>40)&0xff; + * byte[3]=(n>>32)&0xff; byte[4]=(n>>24)&0xff; byte[5]= + * (n>>16)&0xff; byte[6]=(n>>8)&0xff; byte[7]=n&0xff; * <li>if n in [-2^63, 2^63): encode in nine bytes: byte[0]=-121; byte[1] = - * (n>>54)&0xff; byte[2] = (n>>48)&0xff; byte[3] = (n>>40)&0xff; - * byte[4]=(n>>32)&0xff; byte[5]=(n>>24)&0xff; byte[6]=(n>>16)&0xff; - * byte[7]=(n>>8)&0xff; byte[8]=n&0xff; + * (n>>54)&0xff; byte[2] = (n>>48)&0xff; + * byte[3] = (n>>40)&0xff; byte[4]=(n>>32)&0xff; + * byte[5]=(n>>24)&0xff; byte[6]=(n>>16)&0xff; byte[7]= + * (n>>8)&0xff; byte[8]=n&0xff; * </ul> * * @param out @@ -181,15 +187,15 @@ public final class Utils { * Decoding the variable-length integer. Suppose the value of the first byte * is FB, and the following bytes are NB[*]. * <ul> - * <li>if (FB >= -32), return (long)FB; - * <li>if (FB in [-72, -33]), return (FB+52)<<8 + NB[0]&0xff; - * <li>if (FB in [-104, -73]), return (FB+88)<<16 + (NB[0]&0xff)<<8 + - * NB[1]&0xff; - * <li>if (FB in [-120, -105]), return (FB+112)<<24 + (NB[0]&0xff)<<16 + - * (NB[1]&0xff)<<8 + NB[2]&0xff; + * <li>if (FB >= -32), return (long)FB; + * <li>if (FB in [-72, -33]), return (FB+52)<<8 + NB[0]&0xff; + * <li>if (FB in [-104, -73]), return (FB+88)<<16 + + * (NB[0]&0xff)<<8 + NB[1]&0xff; + * <li>if (FB in [-120, -105]), return (FB+112)<<24 + (NB[0]&0xff) + * <<16 + (NB[1]&0xff)<<8 + NB[2]&0xff; * <li>if (FB in [-128, -121]), return interpret NB[FB+129] as a signed * big-endian integer. - * + * </ul> * @param in * input stream * @return the decoded long integer. http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java index 9875bcd..7fcd5fd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java @@ -89,12 +89,12 @@ public class RetryProxy { * * @param iface the interface that the retry will implement * @param proxyProvider provides implementation instances whose methods should be retried - * @param methodNameToPolicyMapa map of method names to retry policies + * @param methodNameToPolicyMap map of method names to retry policies * @return the retry proxy */ public static <T> Object create(Class<T> iface, FailoverProxyProvider<T> proxyProvider, - Map<String,RetryPolicy> methodNameToPolicyMap, + Map<String, RetryPolicy> methodNameToPolicyMap, RetryPolicy defaultPolicy) { return Proxy.newProxyInstance( proxyProvider.getInterface().getClassLoader(), http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Deserializer.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Deserializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Deserializer.java index 3b727d9..3c8dfcc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Deserializer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Deserializer.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; /** * <p> - * Provides a facility for deserializing objects of type <T> from an + * Provides a facility for deserializing objects of type {@literal <T>} from an * {@link InputStream}. * </p> * http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Serializer.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Serializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Serializer.java index 63d3738..5ada541 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Serializer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Serializer.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; /** * <p> - * Provides a facility for serializing objects of type <T> to an + * Provides a facility for serializing objects of type <T> to an * {@link OutputStream}. * </p> * http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java index b156d1f..0be5939 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java @@ -131,7 +131,7 @@ public final class CallerContext { /** * The thread local current caller context. - * <p/> + * <p> * Internal class for defered singleton idiom. * https://en.wikipedia.org/wiki/Initialization_on_demand_holder_idiom */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 07a2f13..49f1e49 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -1357,7 +1357,7 @@ public class Client implements AutoCloseable { * @param remoteId - the target rpc server * @param fallbackToSimpleAuth - set to true or false during this method to * indicate if a secure client falls back to simple auth - * @returns the rpc response + * @return the rpc response * Throws exceptions if there are network problems or if the remote code * threw an exception. */ @@ -1392,7 +1392,7 @@ public class Client implements AutoCloseable { * @param serviceClass - service class for RPC * @param fallbackToSimpleAuth - set to true or false during this method to * indicate if a secure client falls back to simple auth - * @returns the rpc response + * @return the rpc response * Throws exceptions if there are network problems or if the remote code * threw an exception. */ @@ -1461,7 +1461,7 @@ public class Client implements AutoCloseable { /** * Check if RPC is in asynchronous mode or not. * - * @returns true, if RPC is in asynchronous mode, otherwise false for + * @return true, if RPC is in asynchronous mode, otherwise false for * synchronous mode. */ @Unstable @@ -1575,7 +1575,8 @@ public class Client implements AutoCloseable { /** * This class holds the address and the user ticket. The client connections - * to servers are uniquely identified by <remoteAddress, protocol, ticket> + * to servers are uniquely identified by {@literal <}remoteAddress, protocol, + * ticket{@literal >} */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientCache.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientCache.java index d0fb8fd..00d9a79 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientCache.java @@ -37,7 +37,7 @@ public class ClientCache { new HashMap<SocketFactory, Client>(); /** - * Construct & cache an IPC client with the user-provided SocketFactory + * Construct & cache an IPC client with the user-provided SocketFactory * if no cached client exists. * * @param conf Configuration @@ -66,7 +66,7 @@ public class ClientCache { } /** - * Construct & cache an IPC client with the default SocketFactory + * Construct & cache an IPC client with the default SocketFactory * and default valueClass if no cached client exists. * * @param conf Configuration @@ -77,7 +77,7 @@ public class ClientCache { } /** - * Construct & cache an IPC client with the user-provided SocketFactory + * Construct & cache an IPC client with the user-provided SocketFactory * if no cached client exists. Default response type is ObjectWritable. * * @param conf Configuration http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 8bb0ce4..512b0b7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -76,7 +76,8 @@ public class DecayRpcScheduler implements RpcScheduler, /** * Decay factor controls how much each count is suppressed by on each sweep. - * Valid numbers are > 0 and < 1. Decay factor works in tandem with period + * Valid numbers are > 0 and < 1. Decay factor works in tandem with + * period * to control how long the scheduler remembers an identity. */ public static final String IPC_SCHEDULER_DECAYSCHEDULER_FACTOR_KEY = http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java index 3fe9eb7..3622d2c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java @@ -28,7 +28,6 @@ public interface RefreshHandler { * Implement this method to accept refresh requests from the administrator. * @param identifier is the identifier you registered earlier * @param args contains a list of string args from the administrator - * @throws Exception as a shorthand for a RefreshResponse(-1, message) * @return a RefreshResponse */ RefreshResponse handleRefresh(String identifier, String[] args); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java index 620e100..36e280f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java @@ -102,7 +102,7 @@ public class RemoteException extends IOException { * a <code>String</code> as a parameter. * Otherwise it returns this. * - * @return <code>Throwable + * @return <code>Throwable</code> */ public IOException unwrapRemoteException() { try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 76d9c40..d162d2f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -2747,7 +2747,7 @@ public abstract class Server { /** * Constructs a server listening on the named port and address. Parameters passed must - * be of the named class. The <code>handlerCount</handlerCount> determines + * be of the named class. The <code>handlerCount</code> determines * the number of handler threads that will be used to process calls. * If queueSizePerHandler or numReaders are not -1 they will be used instead of parameters * from configuration. Otherwise the configuration will be picked up. http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java index 093d0af..c404ebe 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java @@ -70,7 +70,7 @@ import java.util.Set; * <p> * The optional <code>get</code> parameter is used to query an specific * attribute of a JMX bean. The format of the URL is - * <code>http://.../jmx?get=MXBeanName::AttributeName<code> + * <code>http://.../jmx?get=MXBeanName::AttributeName</code> * <p> * For example * <code> @@ -85,7 +85,7 @@ import java.util.Set; * <p> * The return format is JSON and in the form * <p> - * <code><pre> + * <pre><code> * { * "beans" : [ * { @@ -94,7 +94,7 @@ import java.util.Set; * } * ] * } - * </pre></code> + * </code></pre> * <p> * The servlet attempts to convert the the JMXBeans into JSON. Each * bean's attributes will be converted to a JSON object member. http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java index aa4e61c..41bee04 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java @@ -62,10 +62,10 @@ import org.apache.hadoop.util.Timer; * still maintaining overall information about how many large requests were * received. * - * <p/>This class can also be used to coordinate multiple logging points; see + * <p>This class can also be used to coordinate multiple logging points; see * {@link #record(String, long, double...)} for more details. * - * <p/>This class is not thread-safe. + * <p>This class is not thread-safe. */ public class LogThrottlingHelper { @@ -175,7 +175,7 @@ public class LogThrottlingHelper { * about the values specified since the last time the caller was expected to * write to its log. * - * <p/>Specifying multiple values will maintain separate summary statistics + * <p>Specifying multiple values will maintain separate summary statistics * about each value. For example: * <pre>{@code * helper.record(1, 0); @@ -230,7 +230,7 @@ public class LogThrottlingHelper { * iteration as "pre", yet each one is able to maintain its own summary * information. * - * <p/>Other behavior is the same as {@link #record(double...)}. + * <p>Other behavior is the same as {@link #record(double...)}. * * @param recorderName The name of the recorder. This is used to check if the * current recorder is the primary. Other names are http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java index 22c288a..6803d11 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java @@ -251,7 +251,7 @@ public class MutableRollingAverages extends MutableMetric implements Closeable { } /** - * Retrieve a map of metric name -> (aggregate). + * Retrieve a map of metric name {@literal ->} (aggregate). * Filter out entries that don't have at least minSamples. * * @return a map of peer DataNode Id to the average latency to that http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java index ff7cd25..8fd3b33 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java @@ -87,6 +87,7 @@ <h2><a name="gettingstarted">Getting started</a></h2> <h3>Implementing metrics sources</h3> <table width="99%" border="1" cellspacing="0" cellpadding="4"> + <caption></caption> <tbody> <tr> <th>Using annotations</th><th>Using MetricsSource interface</th> @@ -289,6 +290,7 @@ backend that can handle multiple contexts (file, gangalia etc.): </p> <table width="99%" border="1" cellspacing="0" cellpadding="4"> + <caption></caption> <tbody> <tr> <th width="40%">Before</th><th>After</th> @@ -310,6 +312,7 @@ using the context option in the sink options like the following: </p> <table width="99%" border="1" cellspacing="0" cellpadding="4"> + <caption></caption> <tbody> <tr> <th width="40%">Before</th><th>After</th> http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java index 92ac952..1d330c7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java @@ -111,7 +111,7 @@ import org.apache.hadoop.security.UserGroupInformation; * <i>unknown</i>.</p> * * <p>Instead of appending to an existing file, by default the sink - * will create a new file with a suffix of ".<n>&quet;, where + * will create a new file with a suffix of ".<n>", where * <i>n</i> is the next lowest integer that isn't already used in a file name, * similar to the Hadoop daemon logs. NOTE: the file with the <b>highest</b> * sequence number is the <b>newest</b> file, unlike the Hadoop daemon logs.</p> http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java index b2be0a2..c1dbf7e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java @@ -47,10 +47,10 @@ import org.slf4j.LoggerFactory; * a daemon that is running on the localhost and will add the * hostname to the metric (such as the * <a href="https://collectd.org/">CollectD</a> StatsD plugin). - * <br/> + * <br> * To configure this plugin, you will need to add the following * entries to your hadoop-metrics2.properties file: - * <br/> + * <br> * <pre> * *.sink.statsd.class=org.apache.hadoop.metrics2.sink.StatsDSink * [prefix].sink.statsd.server.host= http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java index 916367f..1b50498 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java @@ -59,8 +59,9 @@ public final class MBeans { /** * Register the MBean using our standard MBeanName format - * "hadoop:service=<serviceName>,name=<nameName>" - * Where the <serviceName> and <nameName> are the supplied parameters. + * "hadoop:service={@literal <serviceName>,name=<nameName>}" + * Where the {@literal <serviceName> and <nameName>} are the supplied + * parameters. * * @param serviceName * @param nameName @@ -75,8 +76,9 @@ public final class MBeans { /** * Register the MBean using our standard MBeanName format - * "hadoop:service=<serviceName>,name=<nameName>" - * Where the <serviceName> and <nameName> are the supplied parameters. + * "hadoop:service={@literal <serviceName>,name=<nameName>}" + * Where the {@literal <serviceName> and <nameName>} are the supplied + * parameters. * * @param serviceName * @param nameName http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java index b2d803c..97723c4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java @@ -29,11 +29,11 @@ import java.util.Map; import java.util.Set; /** - * This is a base class for DNS to Switch mappings. <p/> It is not mandatory to + * This is a base class for DNS to Switch mappings. <p> It is not mandatory to * derive {@link DNSToSwitchMapping} implementations from it, but it is strongly * recommended, as it makes it easy for the Hadoop developers to add new methods * to this base class that are automatically picked up by all implementations. - * <p/> + * <p> * * This class does not extend the <code>Configured</code> * base class, and should not be changed to do so, as it causes problems @@ -81,7 +81,7 @@ public abstract class AbstractDNSToSwitchMapping * multi-rack. Subclasses may override this with methods that are more aware * of their topologies. * - * <p/> + * <p> * * This method is used when parts of Hadoop need know whether to apply * single rack vs multi-rack policies, such as during block placement. @@ -140,7 +140,7 @@ public abstract class AbstractDNSToSwitchMapping /** * Query for a {@link DNSToSwitchMapping} instance being on a single * switch. - * <p/> + * <p> * This predicate simply assumes that all mappings not derived from * this class are multi-switch. * @param mapping the mapping to query http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java index 2fb4d3e..061971c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java @@ -141,7 +141,7 @@ public class DNS { } /** - * Like {@link DNS#getIPs(String, boolean), but returns all + * Like {@link DNS#getIPs(String, boolean)}, but returns all * IPs associated with the given interface and its subinterfaces. */ public static String[] getIPs(String strInterface) http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java index 7b1b332..1e6f5f5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java @@ -40,7 +40,7 @@ public interface DNSToSwitchMapping { * Note the hostname/ip-address is not part of the returned path. * The network topology of the cluster would determine the number of * components in the network path. - * <p/> + * <p> * * If a name cannot be resolved to a rack, the implementation * should return {@link NetworkTopology#DEFAULT_RACK}. This http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index 0f9cfc3..ceb8ec2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -146,8 +146,8 @@ public class NetUtils { /** * Util method to build socket addr from either: - * <host>:<port> - * <fs>://<host>:<port>/<path> + * {@literal <host>:<port>} + * {@literal <fs>://<host>:<port>/<path>} */ public static InetSocketAddress createSocketAddr(String target) { return createSocketAddr(target, -1); @@ -155,9 +155,9 @@ public class NetUtils { /** * Util method to build socket addr from either: - * <host> - * <host>:<port> - * <fs>://<host>:<port>/<path> + * {@literal <host>} + * {@literal <host>:<port>} + * {@literal <fs>://<host>:<port>/<path>} */ public static InetSocketAddress createSocketAddr(String target, int defaultPort) { @@ -938,7 +938,7 @@ public class NetUtils { * Return a free port number. There is no guarantee it will remain free, so * it should be used immediately. * - * @returns A free port for binding a local socket + * @return A free port for binding a local socket */ public static int getFreeSocketPort() { int port = 0; @@ -959,7 +959,7 @@ public class NetUtils { * * @param localAddr * @param bindWildCardAddress - * @returns InetAddress + * @return InetAddress */ public static InetAddress bindToLocalAddress(InetAddress localAddr, boolean bindWildCardAddress) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index 1f077a7..4d4f2bf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -114,7 +114,7 @@ public class NetworkTopology { } /** Add a leaf node - * Update node counter & rack counter if necessary + * Update node counter & rack counter if necessary * @param node node to be added; can be null * @exception IllegalArgumentException if add a node to a leave or node to be added is not a leaf @@ -858,12 +858,12 @@ public class NetworkTopology { /** * Sort nodes array by network distance to <i>reader</i>. - * <p/> + * <p> * In a three-level topology, a node can be either local, on the same rack, * or on a different rack from the reader. Sorting the nodes based on network * distance from the reader reduces network traffic and improves * performance. - * <p/> + * <p> * As an additional twist, we also randomize the nodes at each network * distance. This helps with load balancing when there is data skew. * @@ -881,11 +881,11 @@ public class NetworkTopology { /** * Sort nodes array by network distance to <i>reader</i>. - * <p/> using network location. This is used when the reader + * <p> using network location. This is used when the reader * is not a datanode. Sorting the nodes based on network distance * from the reader reduces network traffic and improves * performance. - * <p/> + * <p> * * @param reader Node where data will be read * @param nodes Available replicas with the requested data @@ -902,7 +902,7 @@ public class NetworkTopology { /** * Sort nodes array by network distance to <i>reader</i>. - * <p/> + * <p> * As an additional twist, we also randomize the nodes at each network * distance. This helps with load balancing when there is data skew. * --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
