This is an automated email from the ASF dual-hosted git repository.

myskov pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 70e6e40a3c HDDS-11122. Fix javadoc warnings (#7234)
70e6e40a3c is described below

commit 70e6e40a3cd541b64cb9c8f53b6ea78f177ed78c
Author: Daniil Prizva <[email protected]>
AuthorDate: Thu Sep 26 09:23:00 2024 +0300

    HDDS-11122. Fix javadoc warnings (#7234)
---
 .../hadoop/hdds/scm/storage/BlockDataStreamOutput.java  |  1 -
 .../hadoop/ozone/client/io/BlockInputStreamFactory.java |  1 -
 .../ozone/client/io/BlockInputStreamFactoryImpl.java    |  1 -
 .../ozone/client/io/ECBlockInputStreamFactory.java      |  1 -
 .../ozone/client/io/ECBlockInputStreamFactoryImpl.java  |  1 -
 .../io/ECBlockReconstructedStripeInputStream.java       |  2 +-
 .../hadoop/hdds/annotation/InterfaceStability.java      |  6 +++---
 .../org/apache/hadoop/hdds/recon/ReconConfigKeys.java   |  6 ++++--
 .../main/java/org/apache/hadoop/hdds/scm/ScmConfig.java |  3 +--
 .../org/apache/hadoop/hdds/scm/client/ScmClient.java    |  2 +-
 .../scm/protocol/StorageContainerLocationProtocol.java  |  2 +-
 .../main/java/org/apache/hadoop/ozone/OzoneConsts.java  |  2 +-
 .../org/apache/hadoop/ozone/common/ChunkBuffer.java     |  2 +-
 .../ozone/upgrade/LayoutVersionInstanceFactory.java     |  8 +++-----
 .../ozone/container/common/impl/ContainerSet.java       |  1 -
 .../common/states/endpoint/HeartbeatEndpointTask.java   |  2 +-
 .../common/states/endpoint/RegisterEndpointTask.java    |  2 +-
 .../ozone/container/common/volume/HddsVolume.java       |  2 +-
 .../ozone/container/common/volume/VolumeInfo.java       | 17 ++++++++++++-----
 .../ozone/container/common/volume/VolumeUsage.java      |  4 ++++
 .../keyvalue/helpers/KeyValueContainerUtil.java         |  5 ++---
 .../ozone/container/ozoneimpl/ContainerController.java  |  1 -
 ...ContainerDatanodeProtocolClientSideTranslatorPB.java |  1 -
 .../ScmBlockLocationProtocolClientSideTranslatorPB.java |  2 +-
 .../x509/certificate/authority/DefaultApprover.java     |  2 +-
 .../x509/certificate/authority/DefaultCAServer.java     |  2 --
 .../apache/hadoop/hdds/server/http/ProfileServlet.java  |  8 +++++---
 .../org/apache/hadoop/hdds/utils/HddsServerUtil.java    |  2 +-
 .../java/org/apache/hadoop/hdds/utils/db/Table.java     |  1 -
 .../org/apache/hadoop/ozone/audit/package-info.java     |  2 +-
 .../hadoop/hdds/scm/SCMCommonPlacementPolicy.java       |  8 ++++----
 .../apache/hadoop/hdds/scm/block/DeletedBlockLog.java   |  2 +-
 .../hadoop/hdds/scm/container/ContainerManager.java     |  1 -
 .../scm/container/placement/metrics/SCMNodeMetric.java  |  3 +--
 .../org/apache/hadoop/hdds/scm/node/NodeManager.java    |  2 --
 .../apache/hadoop/hdds/scm/node/NodeManagerMXBean.java  |  2 +-
 .../apache/hadoop/hdds/scm/node/NodeStateManager.java   |  4 ++--
 .../hadoop/hdds/scm/pipeline/PipelineStateManager.java  |  1 -
 .../hadoop/hdds/scm/security/RootCARotationManager.java |  6 ++++--
 .../hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java |  4 ++--
 .../hadoop/hdds/scm/server/SCMHTTPServerConfig.java     |  7 ++++---
 .../org/apache/hadoop/ozone/client/ObjectStore.java     |  2 +-
 .../src/main/java/org/apache/hadoop/ozone/OFSPath.java  |  2 +-
 .../ozone/om/exceptions/OMLeaderNotReadyException.java  |  2 +-
 .../org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java   |  1 -
 .../hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java      |  2 +-
 .../apache/hadoop/ozone/om/helpers/SnapshotInfo.java    |  2 +-
 .../apache/hadoop/ozone/security/acl/OzoneObjInfo.java  |  2 +-
 .../org/apache/hadoop/ozone/om/OMMetadataManager.java   |  2 +-
 .../java/org/apache/hadoop/ozone/om/KeyManager.java     |  1 -
 .../java/org/apache/hadoop/ozone/om/ListIterator.java   |  2 +-
 .../apache/hadoop/ozone/om/OMMultiTenantManager.java    |  2 +-
 .../hadoop/ozone/om/OMMultiTenantManagerImpl.java       |  1 -
 .../java/org/apache/hadoop/ozone/om/OzoneManager.java   |  5 +++--
 .../hadoop/ozone/om/multitenant/AuthorizerLock.java     |  2 +-
 .../apache/hadoop/ozone/om/request/OMClientRequest.java |  1 -
 .../hadoop/ozone/om/request/key/OMKeyCommitRequest.java |  1 -
 .../hadoop/ozone/om/request/key/OMKeyRequest.java       |  2 +-
 .../hadoop/ozone/om/request/volume/OMVolumeRequest.java |  1 -
 .../apache/hadoop/ozone/om/snapshot/SnapshotUtils.java  |  2 +-
 .../org/apache/hadoop/fs/ozone/OzoneFSInputStream.java  |  1 -
 .../java/org/apache/hadoop/ozone/recon/ReconUtils.java  |  4 ++--
 .../hadoop/ozone/recon/api/ContainerEndpoint.java       |  2 +-
 .../hadoop/ozone/recon/api/OMDBInsightEndpoint.java     |  5 ++---
 .../ozone/recon/spi/ReconContainerMetadataManager.java  |  2 +-
 .../spi/impl/ReconContainerMetadataManagerImpl.java     |  2 +-
 .../ozone/recon/tasks/OmUpdateEventValidator.java       |  2 --
 .../ozone/s3/signature/AWSSignatureProcessor.java       |  2 +-
 .../apache/hadoop/ozone/s3/signature/Credential.java    |  3 +--
 .../hadoop/ozone/admin/nssummary/NSSummaryAdmin.java    |  1 -
 .../hadoop/ozone/freon/HadoopDirTreeGenerator.java      |  2 +-
 .../org/apache/hadoop/ozone/fsck/ContainerMapper.java   |  2 +-
 72 files changed, 91 insertions(+), 104 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index d5423d4ec0..48c77f2c86 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -364,7 +364,6 @@ public class BlockDataStreamOutput implements 
ByteBufferStreamOutput {
    * it is a no op.
    * @param bufferFull flag indicating whether bufferFull condition is hit or
    *              its called as part flush/close
-   * @return minimum commit index replicated to all nodes
    * @throws IOException IOException in case watch gets timed out
    */
   public void watchForCommit(boolean bufferFull) throws IOException {
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java
index d347dee851..8287a5a78b 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java
@@ -43,7 +43,6 @@ public interface BlockInputStreamFactory {
    * @param blockInfo The blockInfo representing the block.
    * @param pipeline The pipeline to be used for reading the block
    * @param token The block Access Token
-   * @param verifyChecksum Whether to verify checksums or not.
    * @param xceiverFactory Factory to create the xceiver in the client
    * @param refreshFunction Function to refresh the block location if needed
    * @return BlockExtendedInputStream of the correct type.
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
index 8a87234a77..d9cadc948a 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
@@ -71,7 +71,6 @@ public class BlockInputStreamFactoryImpl implements 
BlockInputStreamFactory {
    * @param blockInfo The blockInfo representing the block.
    * @param pipeline The pipeline to be used for reading the block
    * @param token The block Access Token
-   * @param verifyChecksum Whether to verify checksums or not.
    * @param xceiverFactory Factory to create the xceiver in the client
    * @param refreshFunction Function to refresh the pipeline if needed
    * @return BlockExtendedInputStream of the correct type.
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java
index 66e7a31337..aca3cfed46 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java
@@ -45,7 +45,6 @@ public interface ECBlockInputStreamFactory {
    *                        know are bad and should not be used.
    * @param repConfig The replication Config
    * @param blockInfo The blockInfo representing the block.
-   * @param verifyChecksum Whether to verify checksums or not.
    * @param xceiverFactory Factory to create the xceiver in the client
    * @param refreshFunction Function to refresh the block location if needed
    * @return BlockExtendedInputStream of the correct type.
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java
index 01d0b0a7b7..41c46aad37 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java
@@ -68,7 +68,6 @@ public final class ECBlockInputStreamFactoryImpl implements
    *                        know are bad and should not be used.
    * @param repConfig The replication Config
    * @param blockInfo The blockInfo representing the block.
-   * @param verifyChecksum Whether to verify checksums or not.
    * @param xceiverFactory Factory to create the xceiver in the client
    * @param refreshFunction Function to refresh the pipeline if needed
    * @return BlockExtendedInputStream of the correct type.
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
index 31f94e0aca..229cc3f3e3 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
@@ -85,7 +85,7 @@ import static java.util.stream.IntStream.range;
  * Parity elements long. Missing or not needed elements should be set to null
  * in the array. The elements should be assigned to the array in EC index 
order.
  *
- * Assuming we have n missing data locations, where n <= parity locations, the
+ * Assuming we have n missing data locations, where n {@literal <=} parity 
locations, the
  * ByteBuffers passed in from the client are either assigned to the decoder
  * input array, or they are assigned to the decoder output array, where
  * reconstructed data is written. The required number of parity buffers will be
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java
index 4251344139..6e9ee94679 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java
@@ -27,9 +27,9 @@ import java.lang.annotation.RetentionPolicy;
  * class or method not changing over time. Currently the stability can be
  * {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
  *
- * <ul><li>All classes that are annotated with {@link Public} or
- * {@link LimitedPrivate} must have InterfaceStability annotation. </li>
- * <li>Classes that are {@link Private} are to be considered unstable unless
+ * <ul><li>All classes that are annotated with {@link 
InterfaceAudience.Public} or
+ * {@link InterfaceAudience.LimitedPrivate} must have InterfaceStability 
annotation. </li>
+ * <li>Classes that are {@link InterfaceAudience.Private} are to be considered 
unstable unless
  * a different InterfaceStability annotation states otherwise.</li>
  * <li>Incompatible changes must not be made to classes marked as stable.</li>
  * </ul>
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
index 3ed9f4e58e..eb6142ea67 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
@@ -20,11 +20,13 @@ package org.apache.hadoop.hdds.recon;
 
 /**
  * This class contains constants for Recon related configuration keys used in
- * SCM & Datanode.
+ * SCM and Datanode.
  */
 public final class ReconConfigKeys {
 
   /**
+   * This class contains constants for Recon related configuration keys used in
+   * SCM and Datanode.
    * Never constructed.
    */
   private ReconConfigKeys() {
@@ -71,7 +73,7 @@ public final class ReconConfigKeys {
    * Recon administrator users delimited by a comma.
    * This is the list of users who can access admin only information from 
recon.
    * Users defined in
-   * {@link org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS}
+   * {@link org.apache.hadoop.ozone.OzoneConfigKeys#OZONE_ADMINISTRATORS}
    * will always be able to access all recon information regardless of this
    * setting.
    */
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
index 3ef9317ced..dd78faf682 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
@@ -206,8 +206,7 @@ public class ScmConfig extends ReconfigurableConfig {
    * required for SCMSecurityProtocol where the KerberosInfo references
    * the old configuration with
    * the annotation shown below:-
-   * @KerberosInfo(serverPrincipal = ScmConfigKeys
-   *    .HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
+   * {@code @KerberosInfo(serverPrincipal = 
ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)}
    */
   public static class ConfigStrings {
     public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY =
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index 34b2680b30..1427fbf587 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -414,7 +414,7 @@ public interface ScmClient extends Closeable {
    * considered to be failed if it has been sent more than MAX_RETRY limit
    * and its count is reset to -1.
    *
-   * @param count Maximum num of returned transactions, if < 0. return all.
+   * @param count Maximum num of returned transactions, if {@literal < 0}. 
return all.
    * @param startTxId The least transaction id to start with.
    * @return a list of failed deleted block transactions.
    * @throws IOException
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 45825cb2b6..f280e9bb6e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -337,7 +337,7 @@ public interface StorageContainerLocationProtocol extends 
Closeable {
    * considered to be failed if it has been sent more than MAX_RETRY limit
    * and its count is reset to -1.
    *
-   * @param count Maximum num of returned transactions, if < 0. return all.
+   * @param count Maximum num of returned transactions, if {@literal < 0}. 
return all.
    * @param startTxId The least transaction id to start with.
    * @return a list of failed deleted block transactions.
    * @throws IOException
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index c6023ccd07..101507b502 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -363,7 +363,7 @@ public final class OzoneConsts {
    * contains illegal characters when creating/renaming key.
    *
    * Avoid the following characters in a key name:
-   * "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]", Quotation
+   * {@literal "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", 
"]"}, Quotation
    * marks and Non-printable ASCII characters (128–255 decimal characters).
    * https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
    */
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java
index d3a558ca43..676216465b 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java
@@ -45,7 +45,7 @@ public interface ChunkBuffer extends UncheckedAutoCloseable {
    *
    * @param increment
    *   the increment size so that this buffer is allocated incrementally.
-   *   When increment <= 0, entire buffer is allocated in the beginning.
+   *   When increment {@literal <= 0}, entire buffer is allocated in the 
beginning.
    */
   static ChunkBuffer allocate(int capacity, int increment) {
     if (increment > 0 && increment < capacity) {
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
index 83e63a2a32..76fc404361 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
@@ -37,18 +37,16 @@ import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Generic factory which stores different instances of Type 'T' sharded by
- * a key & version. A single key can be associated with different versions
+ * a key and version. A single key can be associated with different versions
  * of 'T'.
- *
  * Why does this class exist?
  * A typical use case during upgrade is to have multiple versions of a class
  * / method / object and chose them based  on current layout
  * version at runtime. Before finalizing, an older version is typically
  * needed, and after finalize, a newer version is needed. This class serves
  * this purpose in a generic way.
- *
  * For example, we can create a Factory to create multiple versions of
- * OMRequests sharded by Request Type & Layout Version Supported.
+ * OMRequests sharded by Request Type and Layout Version Supported.
  */
 public class LayoutVersionInstanceFactory<T> {
 
@@ -141,7 +139,7 @@ public class LayoutVersionInstanceFactory<T> {
    * From the list of versioned instances for a given "key", this
    * returns the "floor" value corresponding to the given version.
    * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
-   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
+   * (3, CreateKeyV2), and if the passed in key = CreateKey and version = 2, we
    * return CreateKeyV1.
    * Since this is a priority queue based implementation, we use a O(1) peek()
    * lookup to get the current valid version.
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index b5dfd07d57..15cc6245dd 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -189,7 +189,6 @@ public class ContainerSet implements Iterable<Container<?>> 
{
    * Send FCR which will not contain removed containers.
    *
    * @param  context StateContext
-   * @return
    */
   public void handleVolumeFailures(StateContext context) {
     AtomicBoolean failedVolume = new AtomicBoolean(false);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 44f0eae49e..25db14a101 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -489,7 +489,7 @@ public class HeartbeatEndpointTask
     /**
      * Sets the LayoutVersionManager.
      *
-     * @param versionMgr - config
+     * @param lvm config
      * @return Builder
      */
     public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 71f95cc4d3..969756b40f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -244,7 +244,7 @@ public final class RegisterEndpointTask implements
     /**
      * Sets the LayoutVersionManager.
      *
-     * @param versionMgr - config
+     * @param lvm config
      * @return Builder.
      */
     public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index b22b9148bb..c58aab2e5b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -199,7 +199,7 @@ public class HddsVolume extends StorageVolume {
 
   /**
    * Delete all files under
-   * <volume>/hdds/<cluster-id>/tmp/deleted-containers.
+   * volume/hdds/cluster-id/tmp/deleted-containers.
    * This is the directory where containers are moved when they are deleted
    * from the system, but before being removed from the filesystem. This
    * makes the deletion atomic.
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index af89026925..2172c07c3a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -46,15 +46,17 @@ import org.slf4j.LoggerFactory;
  * - fsCapacity: reported total capacity from local fs.
  * - minVolumeFreeSpace (mvfs) : determines the free space for closing
      containers.This is like adding a few reserved bytes to reserved space.
-     Dn's will send close container action to SCM at this limit & it is
+     Dn's will send close container action to SCM at this limit, and it is
      configurable.
 
  *
- *
+ * <pre>
+ * {@code
  * |----used----|   (avail)   |++mvfs++|++++reserved+++++++|
  * |<-     capacity                  ->|
  *              |     fsAvail      |-------other-----------|
  * |<-                   fsCapacity                      ->|
+ * }</pre>
  *
  * What we could directly get from local fs:
  *     fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
@@ -78,10 +80,12 @@ import org.slf4j.LoggerFactory;
  * then we should use DedicatedDiskSpaceUsage for
  * `hdds.datanode.du.factory.classname`,
  * Then it is much simpler, since we don't care about other usage:
- *
+ * <pre>
+ * {@code
  *  |----used----|             (avail)/fsAvail              |
  *  |<-              capacity/fsCapacity                  ->|
- *
+ *  }
+ * </pre>
  *  We have avail == fsAvail.
  */
 public final class VolumeInfo {
@@ -154,9 +158,12 @@ public final class VolumeInfo {
 
   /**
    * Calculate available space use method A.
+   * <pre>
+   * {@code
    * |----used----|   (avail)   |++++++++reserved++++++++|
    * |<-     capacity         ->|
-   *
+   * }
+   *</pre>
    * A) avail = capacity - used
    */
   public long getAvailable() {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 7e138b0571..a91f0c1f72 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -77,11 +77,15 @@ public class VolumeUsage {
   }
 
   /**
+   * <pre>
+   * {@code
    * Calculate available space use method B.
    * |----used----|   (avail)   |++++++++reserved++++++++|
    *              |     fsAvail      |-------other-------|
    *                          ->|~~~~|<-
    *                      remainingReserved
+   * }
+   * </pre>
    * B) avail = fsAvail - Max(reserved - other, 0);
    */
   public SpaceUsageSource getCurrentUsage() {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index b7d5b5fa59..691ccaa630 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -439,13 +439,13 @@ public final class KeyValueContainerUtil {
 
   /**
    * Moves container directory to a new location
-   * under "<volume>/hdds/<cluster-id>/tmp/deleted-containers"
+   * under "volume/hdds/cluster-id/tmp/deleted-containers"
    * and updates metadata and chunks path.
    * Containers will be moved under it before getting deleted
    * to avoid, in case of failure, having artifact leftovers
    * on the default container path on the disk.
    *
-   * Delete operation for Schema < V3
+   * Delete operation for Schema &lt; V3
    * 1. Container is marked DELETED
    * 2. Container is removed from memory container set
    * 3. Container DB handler from cache is removed and closed
@@ -460,7 +460,6 @@ public final class KeyValueContainerUtil {
    * 5. Container is deleted from tmp directory.
    *
    * @param keyValueContainerData
-   * @return true if renaming was successful
    */
   public static void moveToDeletedContainerDir(
       KeyValueContainerData keyValueContainerData,
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
index ecc4e80b4b..0db98a01d8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
@@ -163,7 +163,6 @@ public class ContainerController {
    * Returns the Container given a container id.
    *
    * @param containerId ID of the container
-   * @return Container
    */
   public void addFinalizedBlock(final long containerId,
       final long localId) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index eeb99b5a3d..d6b44f2a64 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -122,7 +122,6 @@ public class 
StorageContainerDatanodeProtocolClientSideTranslatorPB
   /**
    * Returns SCM version.
    *
-   * @param unused - set to null and unused.
    * @return Version info.
    */
   @Override
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
index 1f114304cc..d9b198d4b1 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
@@ -307,7 +307,7 @@ public final class 
ScmBlockLocationProtocolClientSideTranslatorPB
   }
   /**
    * Sort the datanodes based on distance from client.
-   * @return List<DatanodeDetails></>
+   * @return list of datanodes;
    * @throws IOException
    */
   @Override
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
index b2d62443b7..5a39d0f1dd 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
@@ -104,7 +104,7 @@ public class DefaultApprover implements CertificateApprover 
{
    * @param certSerialId - the new certificate id.
    * @return Signed Certificate.
    * @throws IOException - On Error
-   * @throws OperatorCreationException - on Error.
+   * @throws CertificateException - on Error.
    */
   @SuppressWarnings("ParameterNumber")
   @Override
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
index a93bdb4e3d..118aa82601 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
@@ -195,8 +195,6 @@ public class DefaultCAServer implements CertificateServer {
    *
    * @param certSerialId         - Certificate for this CA.
    * @return X509Certificate
-   * @throws CertificateException - usually thrown if this CA is not
-   * initialized.
    * @throws IOException - on Error.
    */
   @Override
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
index f4f188aaf3..50925fcac7 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
@@ -41,11 +41,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
+ * <pre>
  * Servlet that runs async-profiler as web-endpoint.
- * <p>
+ *
  * Source: https://github.com/apache/hive/blob/master/common/src/java/org
  * /apache/hive/http/ProfileServlet.java
- * <p>
+ *
  * Following options from async-profiler can be specified as query parameter.
  * //  -e event          profiling event: cpu|alloc|lock|cache-misses etc.
  * //  -d duration       run profiling for <duration> seconds
@@ -79,7 +80,7 @@ import org.slf4j.LoggerFactory;
  * curl "http://localhost:10002/prof";
  * - To collect 1 minute CPU profile of current process and output in tree
  * format (html)
- * curl "http://localhost:10002/prof?output=tree&duration=60";
+ * curl{@literal "http://localhost:10002/prof?output=tree&duration=60"}
  * - To collect 30 second heap allocation profile of current process (returns
  * FlameGraph svg)
  * curl "http://localhost:10002/prof?event=alloc";
@@ -111,6 +112,7 @@ import org.slf4j.LoggerFactory;
  * The default output format of the newest async profiler is HTML.
  * If the user is using an older version such as 1.5, HTML is not supported.
  * Please specify the corresponding output format.
+ * </pre>
  */
 public class ProfileServlet extends HttpServlet {
   private static final long serialVersionUID = 1L;
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index c45e772c24..41fea63d20 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -129,7 +129,7 @@ public final class HddsServerUtil {
    * @param conf configuration
    * @param protocol Protocol interface
    * @param service service that implements the protocol
-   * @param server RPC server to which the protocol & implementation is added 
to
+   * @param server RPC server to which the protocol and implementation is 
added to
    */
   public static void addPBProtocol(Configuration conf, Class<?> protocol,
       BlockingService service, RPC.Server server) throws IOException {
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index f153823db7..5cfdcdb8a0 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -308,7 +308,6 @@ public interface Table<KEY, VALUE> extends AutoCloseable {
    * as part of a batch operation.
    * @param batch
    * @param prefix
-   * @return
    */
   void deleteBatchWithPrefix(BatchOperation batch, KEY prefix)
       throws IOException;
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
index 0c1ec710d2..c428f2860e 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
@@ -48,7 +48,7 @@
  * This interface must be implemented by entities requiring audit logging.
  * For example - OMVolumeArgs, OMBucketArgs.
  * The implementing class must override toAuditMap() to return an
- * instance of Map<Key, Value> where both Key and Value are String.
+ * instance of {@code Map<Key, Value>} where both Key and Value are String.
  *
  * Key: must contain printable US ASCII characters
  * May not contain a space, =, ], or "
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
index 471a947941..fabd281076 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
@@ -582,7 +582,7 @@ public abstract class SCMCommonPlacementPolicy implements
    * replication is computed.
    * The algorithm starts with creating a replicaIdMap which contains the
    * replicas grouped by replica Index. A placementGroup Map is created which
-   * groups replicas based on their rack & the replicas within the rack
+   * groups replicas based on their rack and the replicas within the rack
    * are further grouped based on the replica Index.
    * A placement Group Count Map is created which keeps
    * track of the count of replicas in each rack.
@@ -590,9 +590,9 @@ public abstract class SCMCommonPlacementPolicy implements
    * order based on their current replication factor in a descending factor.
    * For each replica Index the replica is removed from the rack which contains
    * the most replicas, in order to achieve this the racks are put
-   * into priority queue & are based on the number of replicas they have.
-   * The replica is removed from the rack with maximum replicas & the replica
-   * to be removed is also removed from the maps created above &
+   * into priority queue and are based on the number of replicas they have.
+   * The replica is removed from the rack with maximum replicas and the replica
+   * to be removed is also removed from the maps created above and
    * the count for rack is reduced.
    * The set of replicas computed are then returned by the function.
    * @param replicas: Set of existing replicas of the container
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
index 45d53c0ef2..5ec68c78d7 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
@@ -57,7 +57,7 @@ public interface DeletedBlockLog extends Closeable {
    * considered to be failed if it has been sent more than MAX_RETRY limit
    * and its count is reset to -1.
    *
-   * @param count Maximum num of returned transactions, if < 0. return all.
+   * @param count Maximum num of returned transactions, if &lt; 0. return all.
    * @param startTxId The least transaction id to start with.
    * @return a list of failed deleted block transactions.
    * @throws IOException
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index 3eba240533..af900d1c14 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -164,7 +164,6 @@ public interface ContainerManager extends Closeable {
    *
    * @param containerID Container ID
    * @param replica ContainerReplica
-   * @return True of dataNode is removed successfully else false.
    */
   void removeContainerReplica(ContainerID containerID, ContainerReplica 
replica)
       throws ContainerNotFoundException, ContainerReplicaNotFoundException;
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
index 094e535dcb..3d113b3d30 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
@@ -42,8 +42,7 @@ public class SCMNodeMetric implements 
DatanodeMetric<SCMNodeStat, Long>,
    * @param capacity  in bytes
    * @param used      in bytes
    * @param remaining in bytes
-   * @param committed
-   * @paaram committed in bytes
+   * @param committed in bytes
    */
   @VisibleForTesting
   public SCMNodeMetric(long capacity, long used, long remaining,
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 25be60945a..992dc82582 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -97,8 +97,6 @@ public interface NodeManager extends 
StorageContainerNodeProtocol,
    * @param type The type of the SCMCommand.
    * @param scmCommand A BiConsumer that takes a DatanodeDetails and a
    *                   SCMCommand object and performs the necessary actions.
-   * @return whatever the regular register command returns with default
-   * layout version passed in.
    */
   default void registerSendCommandNotify(SCMCommandProto.Type type,
       BiConsumer<DatanodeDetails, SCMCommand<?>> scmCommand) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
index a66fc0d22f..1bd9677a36 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
@@ -45,7 +45,7 @@ public interface NodeManagerMXBean {
 
   /**
    * @return Get the NodeStatus table information  like hostname,
-   * Commissioned State & Operational State column for dataNode
+   * Commissioned State and Operational State column for dataNode
    */
   Map<String, Map<String, String>> getNodeStatusInfo();
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index 3c3ff8fb83..78c1801a10 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -791,7 +791,7 @@ public class NodeStateManager implements Runnable, 
Closeable {
    *
    * This method is synchronized to coordinate node state updates between
    * the upgrade finalization thread which calls this method, and the
-   * node health processing thread that calls {@link this#checkNodesHealth}.
+   * node health processing thread that calls {@link #checkNodesHealth}.
    */
   public synchronized void forceNodesToHealthyReadOnly() {
     try {
@@ -817,7 +817,7 @@ public class NodeStateManager implements Runnable, 
Closeable {
   /**
    * This method is synchronized to coordinate node state updates between
    * the upgrade finalization thread which calls
-   * {@link this#forceNodesToHealthyReadOnly}, and the node health processing
+   * {@link #forceNodesToHealthyReadOnly}, and the node health processing
    * thread that calls this method.
    */
   @VisibleForTesting
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index 4dd0443a50..d605887712 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -46,7 +46,6 @@ public interface PipelineStateManager {
   /**
    * Removing pipeline would be replicated to Ratis.
    * @param pipelineIDProto
-   * @return Pipeline removed
    * @throws IOException
    */
   @Replicate
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java
index d38a904d09..2c979ce371 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java
@@ -118,6 +118,8 @@ public class RootCARotationManager extends StatefulService {
    *
    * @param scm the storage container manager
    *
+   * <pre>
+   * {@code
    *                         (1)   (3)(4)
    *                   --------------------------->
    *                         (2)                        scm2(Follower)
@@ -130,8 +132,8 @@ public class RootCARotationManager extends StatefulService {
    *                   --------------------------->
    *                          (2)                       scm3(Follower)
    *                   <---------------------------
-   *
-   *
+   * }
+   * </pre>
    *   (1) Rotation Prepare
    *   (2) Rotation Prepare Ack
    *   (3) Rotation Commit
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index cca2df0037..6f5429a853 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -289,12 +289,12 @@ public final class SCMDatanodeHeartbeatDispatcher {
   public enum ContainerReportType {
     /**
      * Incremental container report type
-     * {@liks IncrementalContainerReportFromDatanode}.
+     * {@link IncrementalContainerReportFromDatanode}.
      */
     ICR,
     /**
      * Full container report type
-     * {@liks ContainerReportFromDatanode}.
+     * {@link ContainerReportFromDatanode}.
      */
     FCR
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
index 2b6fa032b5..5aaf4b7b48 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
@@ -70,9 +70,10 @@ public class SCMHTTPServerConfig {
    * This static class is required to support other classes
    * that reference the key names and also require attributes.
    * Example: SCMSecurityProtocol where the KerberosInfo references
-   * the old configuration with the annotation shown below:-
-   * @KerberosInfo(serverPrincipal =
-   *    ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
+   * the old configuration with the annotation shown below:
+   * <br>
+   * {@code KerberosInfo(serverPrincipal =
+   *    ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)}
    */
   public static class ConfigStrings {
     public static final String HDDS_SCM_HTTP_AUTH_CONFIG_PREFIX =
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index 65dce09cba..56ca8798f2 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -217,7 +217,7 @@ public class ObjectStore {
    * Set secretKey for accessId.
    * @param accessId
    * @param secretKey
-   * @return S3SecretValue <accessId, secretKey> pair
+   * @return {@code S3SecretValue <accessId, secretKey>} pair
    * @throws IOException
    */
   public S3SecretValue setS3Secret(String accessId, String secretKey)
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
index 61ae0879f7..c5985f8209 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
@@ -214,7 +214,7 @@ public class OFSPath {
   }
 
   /**
-   * Get the volume & bucket or mount name (non-key path).
+   * Get the volume and bucket or mount name (non-key path).
    * @return String of path excluding key in bucket.
    */
   // Prepend a delimiter at beginning. e.g. /vol1/buc1
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java
index 0bfd6922fe..8c3943d0fa 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java
@@ -24,7 +24,7 @@ import java.io.IOException;
  * Exception thrown by
  * {@link org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB} when
  * OM leader is not ready to serve requests. This error is thrown when Raft
- * Server returns {@link org.apache.ratis.protocol.LeaderNotReadyException}.
+ * Server returns {@link 
org.apache.ratis.protocol.exceptions.LeaderNotReadyException}.
  */
 public class OMLeaderNotReadyException extends IOException  {
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index f52a142239..a0394d6626 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -360,7 +360,6 @@ public final class OmKeyInfo extends WithParentObjectId
    * @param updateTime if true, updates modification time.
    * @param keepOldVersions if false, old blocks won't be kept
    *                        and the new block versions will always be 0
-   * @throws IOException
    */
   public synchronized long addNewVersion(
       List<OmKeyLocationInfo> newLocationList, boolean updateTime,
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
index 24c172ef8f..ccc47e7fc6 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
@@ -34,7 +34,7 @@ import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
 /**
  * Args for deleted keys. This is written to om metadata deletedTable.
  * Once a key is deleted, it is moved to om metadata deletedTable. Having a
- * {label: List<OMKeyInfo>} ensures that if users create & delete keys with
+ * label: {@code List<OMKeyInfo>} ensures that if users create and delete keys 
with
  * exact same uri multiple times, all the delete instances are bundled under
  * the same key name. This is useful as part of GDPR compliance where an
  * admin wants to confirm if a given key is deleted from deletedTable metadata.
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
index 8584796c2e..7feefdb0b2 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
@@ -52,7 +52,7 @@ import static 
org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
  * Each snapshot created has an associated SnapshotInfo entry
  * containing the snapshotId, snapshot path,
  * snapshot checkpoint directory, previous snapshotId
- * for the snapshot path & global amongst other necessary fields.
+ * for the snapshot path and global amongst other necessary fields.
  */
 public final class SnapshotInfo implements Auditable, CopyObject<SnapshotInfo> 
{
   private static final Codec<SnapshotInfo> CODEC = new DelegatedCodec<>(
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
index ca32c96855..e1f1f3a8c1 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
@@ -24,7 +24,7 @@ import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 
 /**
  * Class representing an ozone object.
- * It can be a volume with non-null volumeName (bucketName=null & name=null)
+ * It can be a volume with non-null volumeName {@literal (bucketName=null & 
name=null)}
  * or a bucket with non-null volumeName and bucketName (name=null)
  * or a key with non-null volumeName, bucketName and key name
  * (via getKeyName)
diff --git 
a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
 
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 67f7ce2f07..ae57c18354 100644
--- 
a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ 
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -647,7 +647,7 @@ public interface OMMetadataManager extends DBStoreHAManager 
{
   long getBucketId(String volume, String bucket) throws IOException;
 
   /**
-   * Returns List<{@link BlockGroup}> for a key in the deletedTable.
+   * Returns {@code List<BlockGroup>} for a key in the deletedTable.
    * @param deletedKey - key to be purged from the deletedTable
    * @return {@link BlockGroup}
    */
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
index 068ba9aa4a..a0f3053d73 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
@@ -49,7 +49,6 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl 
{
    * Start key manager.
    *
    * @param configuration
-   * @throws IOException
    */
   void start(OzoneConfiguration configuration);
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java
index 86d8352697..36edda8941 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java
@@ -43,7 +43,7 @@ import static 
org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_L
 
 /**
  * Common class to do listing of resources after merging
- * rocksDB table cache & actual table.
+ * rocksDB table cache and actual table.
  */
 public class ListIterator {
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java
index f68789b539..2c66dd5035 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java
@@ -129,7 +129,7 @@ public interface OMMultiTenantManager {
       boolean delegated);
 
   /**
-   * List all the user & accessIDs of all users that belong to this Tenant.
+   * List all the user and accessIDs of all users that belong to this Tenant.
    * Note this read is unprotected. See OzoneManager#listUserInTenant
    * @param tenantID
    * @return List of users
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java
index 1d25a49fc5..a5954485bb 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java
@@ -246,7 +246,6 @@ public class OMMultiTenantManagerImpl implements 
OMMultiTenantManager {
      * @param tenantId tenant name
      * @param userRoleName user role name
      * @param adminRoleName admin role name
-     * @return Tenant
      * @throws IOException
      */
     @Override
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 0038bca2e3..afbe980eeb 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -791,8 +791,9 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
    *
    * @param conf OzoneConfiguration
    * @return OM instance
-   * @throws IOException, AuthenticationException in case OM instance
-   *                      creation fails.
+   * @throws IOException AuthenticationException in case OM instance
+   *                      creation fails,
+   * @throws AuthenticationException
    */
   public static OzoneManager createOm(OzoneConfiguration conf)
       throws IOException, AuthenticationException {
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java
index 1dcb0f0cd6..2d59c6259a 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java
@@ -43,7 +43,7 @@ public interface AuthorizerLock {
 
   /**
    * @return stamp that can be passed to
-   *    {@link this#validateOptimisticRead(long)} to check if a write lock was
+   *    {@link #validateOptimisticRead(long)} to check if a write lock was
    *    acquired since the stamp was issued.
    * @throws IOException If an ongoing write prevents the lock from moving to
    *    the read state for longer than the timeout.
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 25a204ded2..17f9663ae1 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -438,7 +438,6 @@ public abstract class OMClientRequest implements 
RequestAuditor {
    * Return String created from OMRequest userInfo. If userInfo is not
    * set, returns null.
    * @return String
-   * @throws IOException
    */
   @VisibleForTesting
   public String getHostName() {
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index cb507cd0f3..378e0cb12c 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -458,7 +458,6 @@ public class OMKeyCommitRequest extends OMKeyRequest {
    * @param omMetrics        om metrics
    * @param exception        exception trace
    * @param omKeyInfo        omKeyInfo
-   * @param result           result
    * @param result           stores the result of the execution
    */
   @SuppressWarnings("parameternumber")
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 09e5d8bca0..729ec57283 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -623,7 +623,7 @@ public abstract class OMKeyRequest extends OMClientRequest {
 
   /**
    * Check bucket quota in bytes.
-   * @paran metadataManager
+   * @param metadataManager
    * @param omBucketInfo
    * @param allocateSize
    * @throws IOException
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
index 904b082e2d..df74edfb1c 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
@@ -129,7 +129,6 @@ public abstract class OMVolumeRequest extends 
OMClientRequest {
    * @param dbVolumeKey
    * @param dbUserKey
    * @param transactionLogIndex
-   * @throws IOException
    */
   protected static void createVolume(
       final OMMetadataManager omMetadataManager, OmVolumeArgs omVolumeArgs,
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java
index 87983b0726..432fced0bb 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java
@@ -244,7 +244,7 @@ public final class SnapshotUtils {
    * <p>
    * Note: Currently, this is only intended to be a special use case in
    * Snapshot. If this is used elsewhere, consider moving this to
-   * @link OMMetadataManager}.
+   * {@link OMMetadataManager}.
    *
    * @param volumeName volume name
    * @param bucketName bucket name
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
index 4dc70bfa56..f873b43ae9 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
@@ -193,7 +193,6 @@ public class OzoneFSInputStream extends FSInputStream
   /**
    * @param buf the ByteBuffer to receive the results of the read operation.
    * @param position offset
-   * @return void
    * @throws IOException if there is some error performing the read
    * @throws EOFException if end of file reached before reading fully
    */
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index 5c9f6a5f4e..1a2a705fc0 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -352,7 +352,8 @@ public class ReconUtils {
    * @param url               url to call
    * @param isSpnego          is SPNEGO enabled
    * @return HttpURLConnection instance of the HTTP call.
-   * @throws IOException, AuthenticationException While reading the response.
+   * @throws IOException While reading the response,
+   * @throws AuthenticationException
    */
   public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory,
                                         String url, boolean isSpnego)
@@ -569,7 +570,6 @@ public class ReconUtils {
    * @param dateFormat
    * @param timeZone
    * @return the epoch milliseconds representation of the date.
-   * @throws ParseException
    */
   public static long convertToEpochMillis(String dateString, String 
dateFormat, TimeZone timeZone) {
     String localDateFormat = dateFormat;
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
index cbdc198f8a..c4d2d35bef 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
@@ -157,7 +157,7 @@ public class ContainerEndpoint {
   }
 
   /**
-   * Return @{@link org.apache.hadoop.hdds.scm.container}
+   * Return {@linkplain org.apache.hadoop.hdds.scm.container}
    * for the containers starting from the given "prev-key" query param for the
    * given "limit". The given "prev-key" is skipped from the results returned.
    *
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index 3f95c04fc9..4620b69fbe 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -734,7 +734,7 @@ public class OMDBInsightEndpoint {
    * /volume1/fso-bucket/dir1/dir2/dir3/file1
    * Input Request for OBS bucket:
    *
-   *    
`api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS`
+   *    {@literal 
`api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS`}
    * Output Response:
    *
    * {
@@ -832,7 +832,7 @@ public class OMDBInsightEndpoint {
    * }
    * Input Request for FSO bucket:
    *
-   *        
`api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS`
+   *        {@literal 
`api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS`}
    * Output Response:
    *
    * {
@@ -930,7 +930,6 @@ public class OMDBInsightEndpoint {
    * }
    *
    * ********************************************************
-   * @throws IOException
    */
   @GET
   @Path("/listKeys")
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
index 59957e1162..bfb2b05aad 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
@@ -177,7 +177,7 @@ public interface ReconContainerMetadataManager {
 
   /**
    * Get a Map of containerID, containerMetadata of Containers only for the
-   * given limit. If the limit is -1 or any integer <0, then return all
+   * given limit. If the limit is -1 or any integer &lt; 0, then return all
    * the containers without any limit.
    *
    * @param limit the no. of containers to fetch.
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
index 46b75e45fa..88dffbacd2 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
@@ -421,7 +421,7 @@ public class ReconContainerMetadataManagerImpl
    * only for the given limit from the given start key. The start containerID
    * is skipped from the result.
    *
-   * Return all the containers if limit < 0.
+   * Return all the containers if limit &lt; 0.
    *
    * @param limit No of containers to get.
    * @param prevContainer containerID after which the
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java
index 3c7ce844e9..b5a690f5eb 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 
 /**
  * OmUpdateEventValidator is a utility class for validating OMDBUpdateEvents
@@ -48,7 +47,6 @@ public class OmUpdateEventValidator {
    * @param keyType          the key type of the event.
    * @param action           the action performed on the event.
    * @return true if the event is valid, false otherwise.
-   * @throws IOException if an I/O error occurs during the validation.
    */
   public boolean isValidEvent(String tableName,
                               Object actualValueType,
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java
index 43a1e6b713..d517154de8 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java
@@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory;
 import static 
org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER;
 
 /**
- * Parser to process AWS V2 & V4 auth request. Creates string to sign and auth
+ * Parser to process AWS V2 and V4 auth request. Creates string to sign and 
auth
  * header. For more details refer to AWS documentation https://docs.aws
  * .amazon.com/general/latest/gr/sigv4-create-canonical-request.html.
  **/
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
index be9ecce7c0..2746de8e5c 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone.s3.signature;
 
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -53,7 +52,7 @@ public class Credential {
    * Sample credential value:
    * Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request
    *
-   * @throws OS3Exception
+   * @throws MalformedResourceException
    */
   @SuppressWarnings("StringSplitter")
   public void parseCredential() throws MalformedResourceException {
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java
index 991099f270..46a311e354 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java
@@ -108,7 +108,6 @@ public class NSSummaryAdmin extends GenericCli implements 
SubcommandWithParent {
    * Returns false if bucket is part of path but not a OBS bucket.
    * @param path
    * @return true if bucket is OBS bucket or not part of provided path.
-   * @throws IOException
    */
   public boolean isNotValidBucketOrOBSBucket(String path) {
     OFSPath ofsPath = new OFSPath(path,
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
index 3eb879d5c0..5bc2c40931 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
@@ -31,7 +31,7 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
- * Directory & File Generator tool to test OM performance.
+ * Directory and File Generator tool to test OM performance.
  */
 @Command(name = "dtsg",
     aliases = "dfs-tree-generator",
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
index a7a74c2e37..76ac3144d4 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
@@ -67,7 +67,7 @@ public class ContainerMapper {
   /**
    * Generates Container Id to Blocks and BlockDetails mapping.
    * @param configuration @{@link OzoneConfiguration}
-   * @return Map<Long, List<Map<Long, @BlockDetails>>>
+   * @return {@code Map<Long, List<Map<Long, @BlockDetails>>>}
    *   Map of ContainerId -> (Block, Block info)
    * @throws IOException
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to