This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit cf48ee5820c1add612822c27be5cc30e06933a61
Author: Istvan Toth <st...@apache.org>
AuthorDate: Thu Jan 28 17:43:07 2021 +0100

    PHOENIX-6346 Javadoc generation fails with Java 8
---
 .../InterRegionServerIndexRpcControllerFactory.java  |  2 +-
 ...nterRegionServerMetadataRpcControllerFactory.java |  2 +-
 .../hbase/regionserver/IndexHalfStoreFileReader.java |  4 ++--
 .../hbase/regionserver/IndexKeyValueSkipListSet.java |  6 +++---
 .../regionserver/LocalIndexStoreFileScanner.java     |  2 +-
 .../hbase/regionserver/ScannerContextUtil.java       |  2 +-
 .../hbase/regionserver/wal/IndexedHLogReader.java    |  8 ++++----
 .../cache/aggcache/SpillableGroupByCache.java        |  8 ++++----
 .../java/org/apache/phoenix/compile/BindManager.java |  2 +-
 .../org/apache/phoenix/compile/ColumnResolver.java   |  3 +--
 .../org/apache/phoenix/compile/JoinCompiler.java     |  2 +-
 .../java/org/apache/phoenix/compile/KeyPart.java     |  2 +-
 .../org/apache/phoenix/compile/SubqueryRewriter.java |  2 ++
 .../apache/phoenix/compile/SubselectRewriter.java    |  4 +---
 .../org/apache/phoenix/compile/WhereOptimizer.java   |  2 +-
 .../coprocessor/ChildLinkMetaDataEndpoint.java       |  2 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java    |  2 +-
 .../apache/phoenix/coprocessor/MetaDataProtocol.java |  2 +-
 .../apache/phoenix/expression/CaseExpression.java    |  6 ++----
 .../phoenix/expression/ComparisonExpression.java     |  2 +-
 .../apache/phoenix/expression/ModulusExpression.java |  4 ++--
 .../apache/phoenix/expression/OrderByExpression.java |  8 +++-----
 .../aggregator/PercentileDiscClientAggregator.java   |  2 +-
 .../expression/function/CeilDateExpression.java      |  2 +-
 .../expression/function/CountAggregateFunction.java  |  2 +-
 .../function/DistinctCountAggregateFunction.java     |  2 +-
 .../function/ExternalSqlTypeIdFunction.java          |  2 +-
 .../expression/function/FirstValueFunction.java      |  2 +-
 .../expression/function/FirstValuesFunction.java     |  2 +-
 .../phoenix/expression/function/LTrimFunction.java   |  4 ++--
 .../expression/function/LastValueFunction.java       |  2 +-
 .../expression/function/LastValuesFunction.java      |  2 +-
 .../phoenix/expression/function/LengthFunction.java  |  4 ++--
 .../expression/function/NthValueFunction.java        |  2 +-
 .../function/PercentRankAggregateFunction.java       |  2 +-
 .../function/PercentileContAggregateFunction.java    |  2 +-
 .../function/PercentileDiscAggregateFunction.java    |  2 +-
 .../phoenix/expression/function/RTrimFunction.java   |  4 ++--
 .../phoenix/expression/function/RandomFunction.java  |  2 ++
 .../expression/function/RegexpReplaceFunction.java   |  2 +-
 .../expression/function/RegexpSubstrFunction.java    | 10 +++++-----
 .../expression/function/RoundDateExpression.java     |  2 +-
 .../expression/function/StddevPopFunction.java       |  2 +-
 .../expression/function/StddevSampFunction.java      |  2 +-
 .../phoenix/expression/function/SubstrFunction.java  | 10 +++++-----
 .../phoenix/expression/function/TrimFunction.java    |  2 +-
 .../phoenix/expression/function/TruncFunction.java   |  2 +-
 .../java/org/apache/phoenix/hbase/index/Indexer.java | 14 +++++++-------
 .../hbase/index/builder/BaseIndexBuilder.java        |  2 --
 .../phoenix/hbase/index/builder/IndexBuilder.java    | 10 ++++------
 .../phoenix/hbase/index/covered/IndexCodec.java      |  1 -
 .../phoenix/hbase/index/covered/LocalTableState.java |  2 +-
 .../hbase/index/covered/data/IndexMemStore.java      |  4 ++--
 .../index/covered/filter/NewerTimestampFilter.java   |  2 +-
 .../exception/SingleIndexWriteFailureException.java  |  2 +-
 .../hbase/index/scanner/FilteredKeyValueScanner.java |  2 +-
 .../hbase/index/util/IndexManagementUtil.java        |  2 +-
 .../phoenix/hbase/index/wal/KeyValueCodec.java       |  2 +-
 .../hbase/index/write/IndexFailurePolicy.java        |  4 ++--
 .../phoenix/hbase/index/write/IndexWriter.java       |  4 ++--
 .../phoenix/hbase/index/write/IndexWriterUtils.java  |  2 +-
 .../write/TrackingParallelWriterIndexCommitter.java  |  2 +-
 .../write/recovery/PerRegionIndexWriteCache.java     |  2 +-
 .../org/apache/phoenix/index/IndexMaintainer.java    |  2 +-
 .../phoenix/index/IndexMetaDataCacheClient.java      |  2 +-
 .../apache/phoenix/index/PhoenixIndexBuilder.java    |  2 +-
 .../phoenix/index/PhoenixIndexFailurePolicy.java     |  9 +++------
 .../java/org/apache/phoenix/jdbc/PhoenixDriver.java  |  2 +-
 .../org/apache/phoenix/jdbc/PhoenixStatement.java    |  2 +-
 .../org/apache/phoenix/join/HashCacheClient.java     | 11 ++++++++++-
 .../phoenix/mapreduce/CsvToKeyValueMapper.java       |  2 +-
 .../mapreduce/ImportPreUpsertKeyValueProcessor.java  |  4 ++--
 .../phoenix/mapreduce/JsonToKeyValueMapper.java      |  2 +-
 .../apache/phoenix/mapreduce/PhoenixInputFormat.java |  1 -
 .../apache/phoenix/mapreduce/PhoenixInputSplit.java  |  2 +-
 .../phoenix/mapreduce/RegexToKeyValueMapper.java     |  2 +-
 .../phoenix/monitoring/MutationMetricQueue.java      |  2 +-
 .../apache/phoenix/monitoring/ReadMetricQueue.java   |  2 +-
 .../parse/AndRewriterBooleanParseNodeVisitor.java    |  2 +-
 .../apache/phoenix/parse/ComparisonParseNode.java    |  2 +-
 .../phoenix/parse/GreaterThanOrEqualParseNode.java   |  2 +-
 .../apache/phoenix/parse/GreaterThanParseNode.java   |  2 +-
 .../main/java/org/apache/phoenix/parse/HintNode.java |  2 +-
 .../phoenix/parse/LessThanOrEqualParseNode.java      |  2 +-
 .../org/apache/phoenix/parse/LessThanParseNode.java  |  2 +-
 .../org/apache/phoenix/parse/NotEqualParseNode.java  |  2 +-
 .../org/apache/phoenix/parse/ParseNodeFactory.java   |  2 +-
 .../org/apache/phoenix/parse/ParseNodeRewriter.java  |  4 ++--
 .../apache/phoenix/parse/ShowTablesStatement.java    |  2 +-
 .../phoenix/query/ConnectionQueryServices.java       |  2 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java   |  1 -
 .../query/ConnectionlessQueryServicesImpl.java       |  4 ++--
 .../apache/phoenix/query/GuidePostsCacheImpl.java    |  2 +-
 .../apache/phoenix/query/HBaseFactoryProvider.java   |  2 +-
 .../org/apache/phoenix/query/HConnectionFactory.java |  4 ++--
 .../org/apache/phoenix/query/PropertyPolicy.java     |  8 +++++---
 .../apache/phoenix/query/PropertyPolicyProvider.java |  2 +-
 .../java/org/apache/phoenix/query/QueryServices.java |  2 +-
 .../org/apache/phoenix/schema/ColumnModifier.java    |  2 +-
 .../java/org/apache/phoenix/schema/SortOrder.java    | 12 ++++++------
 .../schema/stats/DefaultStatisticsCollector.java     |  1 -
 .../apache/phoenix/schema/stats/GuidePostsInfo.java  |  2 +-
 .../phoenix/schema/stats/GuidePostsInfoBuilder.java  |  1 -
 .../phoenix/schema/stats/StatisticsCollector.java    |  1 -
 .../phoenix/schema/stats/StatisticsWriter.java       |  2 +-
 .../apache/phoenix/schema/types/PArrayDataType.java  |  4 ++--
 .../org/apache/phoenix/schema/types/PDataType.java   |  2 +-
 .../transaction/PhoenixTransactionContext.java       |  2 --
 .../transaction/PhoenixTransactionProvider.java      |  1 -
 .../apache/phoenix/util/DefaultEnvironmentEdge.java  |  1 -
 .../java/org/apache/phoenix/util/ExpressionUtil.java |  2 +-
 .../java/org/apache/phoenix/util/MetaDataUtil.java   |  7 +++----
 .../apache/phoenix/util/PhoenixContextExecutor.java  |  5 ++---
 .../org/apache/phoenix/util/PhoenixKeyValueUtil.java |  2 +-
 .../java/org/apache/phoenix/util/PhoenixRuntime.java | 20 ++++++++++++--------
 .../java/org/apache/phoenix/util/ReadOnlyProps.java  |  4 ++--
 .../java/org/apache/phoenix/util/StringUtil.java     |  2 +-
 .../main/java/org/apache/phoenix/util/ViewUtil.java  | 13 +++++++------
 .../org/apache/phoenix/pherf/jmx/MonitorManager.java |  4 ++--
 .../java/org/apache/phoenix/pherf/result/Result.java |  2 +-
 .../apache/phoenix/pherf/result/ResultManager.java   |  3 +--
 .../phoenix/pherf/rules/RuleBasedDataGenerator.java  |  2 +-
 .../org/apache/phoenix/pherf/rules/RulesApplier.java |  2 +-
 123 files changed, 198 insertions(+), 205 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
index 47b6c40..c761f9c 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 
 /**
- * {@link RpcControllerFactory} that should only be used when creating {@link 
Table} for
+ * RpcControllerFactory that should only be used when creating Table for
  * making remote RPCs to the region servers hosting global mutable index table 
regions.
  * This controller factory shouldn't be globally configured anywhere and is 
meant to be used
  * only internally by Phoenix indexing code.
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java
index 3f63ac3..0a5e8ed 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 
 /**
- * {@link RpcControllerFactory} that should only be used when creating {@link 
Table} for
+ * {@link RpcControllerFactory} that should only be used when creating Table 
for
  * making remote RPCs to the region servers hosting Phoenix SYSTEM tables.
  */
 public class InterRegionServerMetadataRpcControllerFactory extends 
RpcControllerFactory {
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 99ba0ff..80ea6be 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -49,8 +49,8 @@ import org.apache.phoenix.index.IndexMaintainer;
  * This type works in tandem with the {@link Reference} type. This class is 
used reading while
  * Reference is used writing.
  *
- * <p>
- * This file is not splitable. Calls to {@link #midkey()} return null.
+ * 
+ * This file is not splitable. Calls to #midkey() return null.
  */
 
 public class IndexHalfStoreFileReader extends CompatStoreFileReader {
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
index 0d2de89..4659484 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.CellComparator;
 
 /**
  * Like a {@link KeyValueSkipListSet}, but also exposes useful, atomic methods 
(e.g.
- * {@link #putIfAbsent(KeyValue)}).
+ * #putIfAbsent(KeyValue)).
  */
 public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
 
@@ -54,7 +54,7 @@ public class IndexKeyValueSkipListSet extends 
KeyValueSkipListSet {
   }
 
   /**
-   * Add the passed {@link KeyValue} to the set, only if one is not already 
set. This is equivalent
+   * Add the passed KeyValue to the set, only if one is not already set. This 
is equivalent
    * to
    * <pre>
    * if (!set.containsKey(key))
@@ -63,7 +63,7 @@ public class IndexKeyValueSkipListSet extends 
KeyValueSkipListSet {
    *  return map.set(key);
    * </pre>
    * except that the action is performed atomically.
-   * @param kv {@link KeyValue} to add
+   * @param kv KeyValue to add
    * @return the previous value associated with the specified key, or 
<tt>null</tt> if there was no
    *         previously stored key
    * @throws ClassCastException if the specified key cannot be compared with 
the keys currently in
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
index 000f9c7..4952e70 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
@@ -222,7 +222,7 @@ public class LocalIndexStoreFileScanner extends 
StoreFileScanner{
     
     /**
      * 
-     * @param kv
+     * @param cell
      * @param isSeek pass true for seek, false for reseek.
      * @return 
      * @throws IOException
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
index 040b98b..b17d541 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 
 /**
- * @ScannerContext has all methods package visible. To properly update the 
context progress for our scanners we
+ * ScannerContext has all methods package visible. To properly update the 
context progress for our scanners we
  * need this helper
  */
 public class ScannerContextUtil {
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedHLogReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedHLogReader.java
index cc2fec6..f6abb1e 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedHLogReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedHLogReader.java
@@ -23,10 +23,10 @@ import java.io.IOException;
 
 
 /**
- * A WALReader that can also deserialize custom {@link WALEdit}s that contain 
index information.
- * <p>
- * This is basically a wrapper around a {@link SequenceFileLogReader} that has 
a custom
- * {@link SequenceFileLogReader.WALReader#next(Object)} method that only 
replaces the creation of the WALEdit with our own custom
+ * A WALReader that can also deserialize custom WALEdit s that contain index 
information.
+ * 
+ * This is basically a wrapper around a SequenceFileLogReader that has a custom
+ * SequenceFileLogReader.WALReader#next(Object) method that only replaces the 
creation of the WALEdit with our own custom
  * type
  * <p>
  * This is a little bit of a painful way of going about this, but saves the 
effort of hacking the
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index 9f75d31..17c5bab 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -58,7 +58,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * The main entry point is in GroupedAggregateRegionObserver. It instantiates 
a SpillableGroupByCache and invokes a
- * get() method on it. There is no: "if key not exists -> put into map" case, 
since the cache is a Loading cache and
+ * get() method on it. There is no: {@code "if key not exists -> put into map" 
} case, since the cache is a Loading cache and
  * therefore handles the put under the covers. I tried to implement the final 
cache element accesses (RegionScanner
  * below) streaming, i.e. there is just an iterator on it and removed the 
existing result materialization.
  * SpillableGroupByCache implements a LRU cache using a LinkedHashMap with 
access order. There is a configurable an
@@ -120,10 +120,10 @@ public class SpillableGroupByCache implements 
GroupByCache {
     /**
      * Instantiates a Loading LRU Cache that stores key / aggregator[] tuples 
used for group by queries
      *
-     * @param estSize
-     * @param estValueSize
+     * @param env
+     * @param tenantId
      * @param aggs
-     * @param ctxt
+     * @param estSizeNum
      */
     public SpillableGroupByCache(final RegionCoprocessorEnvironment env, 
ImmutableBytesPtr tenantId,
             ServerAggregators aggs, final int estSizeNum) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/BindManager.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/BindManager.java
index fc5fbaf..f7a2a6d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/BindManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/BindManager.java
@@ -37,7 +37,7 @@ import org.apache.phoenix.schema.PDatum;
  * 2) the retrieve param metadata case where we don't have the bind values.
  * 
  * In both cases, during query compilation we figure out what type the bind 
variable
- * "should" be, based on how it's used in the query. For example foo < ? would 
expect
+ * "should" be, based on how it's used in the query. For example {@code foo < 
? } would expect
  * that the bind variable type matches or can be coerced to the type of foo. 
For (1),
  * we check that the bind value has the correct type and for (2) we set the 
param
  * metadata type.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnResolver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnResolver.java
index 6c91123..a3d8567 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnResolver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnResolver.java
@@ -56,8 +56,7 @@ public interface ColumnResolver {
      * @param schemaName the schema name
      * @param tableName the table name or table alias
      * @return the resolved TableRef
-     * @throws TableNotFoundException if the table could not be resolved
-     * @throws AmbiguousTableException if the table name is ambiguous
+     * @throws SQLException
      */
     public TableRef resolveTable(String schemaName, String tableName) throws 
SQLException;
     
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 675ca83..d2a3175 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -397,7 +397,7 @@ public class JoinCompiler {
 
         /**
          * Pruning columns for each {@link JoinCompiler.Table} if
-         * {@link @link JoinCompiler.Table#isSubselect()}.
+         * {@link JoinCompiler.Table#isSubselect()}.
          * @throws SQLException
          */
         public void pruneSubselectAliasedNodes() throws SQLException {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/KeyPart.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/KeyPart.java
index 6cf9938..c2d8e7e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/KeyPart.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/KeyPart.java
@@ -41,7 +41,7 @@ public interface KeyPart {
      * SUBSTR(foo,1,3) = 'bar', the key range would be ['bar','bas'),
      * and if foo was fixed length, the upper and lower key range
      * bytes would be filled out to the fixed length.
-     * @param op comparison operator (=, <=, <, >=, >, !=)
+     * @param op comparison operator {@code (=, <=, <, >=, >, !=) }
      * @param rhs the constant on the RHS of an expression.
      * @return the key range that encompasses the range for the
      *  expression for which this keyPart is associated
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
index 7f67e5d..ab9649e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
@@ -135,6 +135,7 @@ public class SubqueryRewriter extends ParseNodeRewriter {
 
     /**
      * <pre>
+     * {@code
      * Rewrite the In Subquery to semi/anti/left join for both NonCorrelated 
and Correlated subquery.
      *
      * 1.If the {@link InParseNode} is the only node in where clause or is the 
ANDed part of the where clause,
@@ -177,6 +178,7 @@ public class SubqueryRewriter extends ParseNodeRewriter {
      *     SELECT ITEM_ID,NAME FROM item I  Left JOIN
      *     (SELECT DISTINCT 1 $28, MAX(ITEM_ID) $29,O.PRICE $27 FROM order O  
GROUP BY O.PRICE,O.CUSTOMER_ID) $26
      *     ON ((I.ITEM_ID = $26.$29 AND $26.$27 = I.PRICE)) WHERE ($26.$28 IS 
NOT NULL  OR I.DISCOUNT1 > 10)
+     * }
      * </pre>
      */
     @Override
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
index c3da4fb..a7d6dab 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
@@ -68,7 +68,6 @@ public class SubselectRewriter extends ParseNodeRewriter {
      * @param preFilterParseNodes
      * @param subselectAlias
      * @return
-     * @throws SQLException
      */
     public static SelectStatement applyPreFiltersForSubselect(
             SelectStatement subselectStatement,
@@ -111,7 +110,6 @@ public class SubselectRewriter extends ParseNodeRewriter {
      * added to the statement by {@link #applyPreFiltersForSubselect}.
      * @param statement
      * @return
-     * @throws SQLException
      */
     public static boolean isFilterCanPushDownToSelect(SelectStatement 
statement) {
         return statement.getLimit() == null &&
@@ -282,7 +280,7 @@ public class SubselectRewriter extends ParseNodeRewriter {
     /**
      * Pruning selectAliasedNodes according to referencedColumnNames,
      * Note: the selectStatement is supposed to be a {@link DerivedTableNode} 
of an Outer SelectStatement,
-     * so according to {@link 
FromCompiler.MultiTableColumnResolver#visit(DerivedTableNode)},
+     * so according to 
FromCompiler.MultiTableColumnResolver#visit(DerivedTableNode) ,
      * wildcard in selectAliasedNode is not supported.
      * @param selectStatement
      * @param referencedColumnNames
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index 5daa582..65bbbfd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -1809,7 +1809,7 @@ public class WhereOptimizer {
         /**
          * 
          * Implementation of KeySlots for AND and OR expressions. The
-         * List<KeySlot> will be in PK order.
+         * {@code List<KeySlot> } will be in PK order.
          *
          */
         public static class MultiKeySlot implements KeySlots {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ChildLinkMetaDataEndpoint.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ChildLinkMetaDataEndpoint.java
index 99159b5..90f516c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ChildLinkMetaDataEndpoint.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ChildLinkMetaDataEndpoint.java
@@ -48,7 +48,7 @@ import static 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.mutateRowsWith
 
 /**
  * Endpoint co-processor through which Phoenix metadata mutations for 
SYSTEM.CHILD_LINK flow.
- * The parent->child links ({@link 
org.apache.phoenix.schema.PTable.LinkType#CHILD_TABLE})
+ * The {@code parent->child } links ({@link 
org.apache.phoenix.schema.PTable.LinkType#CHILD_TABLE})
  * are stored in the SYSTEM.CHILD_LINK table.
  */
 public class ChildLinkMetaDataEndpoint extends ChildLinkMetaDataService 
implements RegionCoprocessor {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index b41c6ab..8204bf9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -260,7 +260,7 @@ import com.google.protobuf.Service;
  * is stored in a single header row. Column information is stored in a separate
  * row per column. Linking information (indexes, views etc) are stored using a
  * separate row for each link that uses the {@link LinkType} column value. The
- * parent->child links are stored in a separate SYSTEM.CHILD_LINK table.
+ * {@code parent->child } links are stored in a separate SYSTEM.CHILD_LINK 
table.
  * Metadata for all tables/views/indexes in the same schema are stored in a
  * single region which is enforced using the {@link MetaDataSplitPolicy}.
  * <p>
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 37aaf8f..e4fae7e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -52,7 +52,7 @@ import com.google.protobuf.ByteString;
  * an HBase table named SYSTEM.TABLE. Each table is represented by:
  * - one row for the table
  * - one row per column in the tabe
- * Upto {@link #DEFAULT_MAX_META_DATA_VERSIONS} versions are kept. The time
+ * Upto #DEFAULT_MAX_META_DATA_VERSIONS versions are kept. The time
  * stamp of the metadata must always be increasing. The timestamp of the key
  * values in the data row corresponds to the schema that it's using.
  *
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/CaseExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/CaseExpression.java
index c426d83..966dc63 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/CaseExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/CaseExpression.java
@@ -107,10 +107,8 @@ public class CaseExpression extends BaseCompoundExpression 
{
     }
     /**
      * Construct CASE/WHEN expression
-     * @param expressions list of expressions in the form of:
-     *  ((<result expression>, <boolean expression>)+, [<optional else result 
expression>])
-     * @throws SQLException if return type of case expressions do not match 
and cannot
-     *  be coerced to a common type
+     * @param children list of expressions in the form of:
+     * {@code ((<result expression>, <boolean expression>)+, [<optional else 
result expression>]) }
      */
     public CaseExpression(List<Expression> children) {
         super(children);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java
index e387107..bbbd2d0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java
@@ -56,7 +56,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
 
 /**
  * 
- * Implementation for <,<=,>,>=,=,!= comparison expressions
+ * Implementation for {@code <,<=,>,>=,=,!= } comparison expressions
  * 
  * @since 0.1
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/ModulusExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/ModulusExpression.java
index b6b669f..99006c0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/ModulusExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/ModulusExpression.java
@@ -27,11 +27,11 @@ import org.apache.phoenix.schema.types.PLong;
 
 
 /**
- * 
+ * {@code
  * Implementation of the LENGTH(<string>) build-in function. <string> is the 
string
  * of characters we want to find the length of. If <string> is NULL or empty, 
null
  * is returned.
- * 
+ * }
  * 
  * @since 0.1
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/OrderByExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/OrderByExpression.java
index 0eac81c..8f71de9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/OrderByExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/OrderByExpression.java
@@ -51,9 +51,7 @@ public class OrderByExpression implements Writable {
 
     /**
      * If {@link Expression#getSortOrder()} is {@link SortOrder#DESC},the 
isAscending of returned new is reversed,but isNullsLast is untouched.
-     * @param expression
-     * @param isNullsLast
-     * @param isAscending
+     * @param orderByExpression
      * @return
      */
     public static OrderByExpression 
convertIfExpressionSortOrderDesc(OrderByExpression orderByExpression) {
@@ -65,7 +63,7 @@ public class OrderByExpression implements Writable {
 
     /**
      * If {@link Expression#getSortOrder()} is {@link SortOrder#DESC},reverse 
the isAscending,but isNullsLast is untouched.
-     * A typical case is in {@link OrderByCompiler#compile} to get the 
compiled {@link OrderByExpression} to used for {@link OrderedResultIterator}.
+     * A typical case is in OrderByCompiler#compile to get the compiled 
OrderByExpression to used for OrderedResultIterator.
      * @param expression
      * @param isNullsLast
      * @param isAscending
@@ -80,7 +78,7 @@ public class OrderByExpression implements Writable {
 
     /**
      * If orderByReverse is true, reverse the isNullsLast and isAscending.
-     * A typical case is in {@link 
AggregatePlan.OrderingResultIteratorFactory#newIterator}
+     * A typical case is in 
AggregatePlan.OrderingResultIteratorFactory#newIterator
      * @param expression
      * @param isNullsLast
      * @param isAscending
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/PercentileDiscClientAggregator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/PercentileDiscClientAggregator.java
index 54541e7..2589795 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/PercentileDiscClientAggregator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/PercentileDiscClientAggregator.java
@@ -29,7 +29,7 @@ import org.apache.phoenix.schema.tuple.Tuple;
 
 /**
  * 
- * Built-in function for PERCENTILE_DISC(<expression>) WITHIN GROUP (ORDER BY 
<expression> ASC/DESC) aggregate function
+ * Built-in function for {@code PERCENTILE_DISC(<expression>) WITHIN GROUP 
(ORDER BY <expression> ASC/DESC) } aggregate function
  *
  * 
  * @since 1.2.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CeilDateExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CeilDateExpression.java
index 3c01260..4730d00 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CeilDateExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CeilDateExpression.java
@@ -37,7 +37,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
 
 /**
  * 
- * Class encapsulating ceil operation on {@link 
org.apache.phoenix.schema.types.PDataType#DATE}.
+ * Class encapsulating ceil operation on 
org.apache.phoenix.schema.types.PDataType#DATE.
  *
  * 
  * @since 3.0.0
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CountAggregateFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CountAggregateFunction.java
index 6eef42d..9f6fe27 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CountAggregateFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CountAggregateFunction.java
@@ -37,7 +37,7 @@ import org.apache.phoenix.util.SchemaUtil;
 
 /**
  * 
- * Built-in function for COUNT(<expression>) aggregate function,
+ * Built-in function for {@code COUNT(<expression>) } aggregate function,
  * for example COUNT(foo), COUNT(1), COUNT(*)
  *
  * 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/DistinctCountAggregateFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/DistinctCountAggregateFunction.java
index 958f8fd..e27973a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/DistinctCountAggregateFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/DistinctCountAggregateFunction.java
@@ -36,7 +36,7 @@ import org.apache.phoenix.util.SchemaUtil;
 
 /**
  * 
- * Built-in function for COUNT(distinct <expression>) aggregate function,
+ * Built-in function for {@code COUNT(distinct <expression>) } aggregate 
function,
  *
  * 
  * @since 1.2.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunction.java
index f510181..d8b300b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunction.java
@@ -36,7 +36,7 @@ import java.util.List;
  * Function used to get the external SQL type id from the internal SQL type 
integer.
  * Typically the external and internal ids are the same, but for some types 
(e.g. arrays)
  * there is are multiple specific internal types to represent multiple 
external types.
- * <p/>
+ * 
  * Usage:
  * ExternalSqlTypeId(12)
  * will return 12 based on {@link java.sql.Types#VARCHAR} being 12
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FirstValueFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FirstValueFunction.java
index 420deba..09871d9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FirstValueFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FirstValueFunction.java
@@ -30,7 +30,7 @@ import org.apache.phoenix.parse.FunctionParseNode;
 import org.apache.phoenix.schema.types.PBoolean;
 
 /**
- * Built-in function for FIRST_VALUE(<expression>) WITHIN GROUP (ORDER BY 
<expression> ASC/DESC) aggregate
+ * Built-in function for {@code FIRST_VALUE(<expression>) WITHIN GROUP (ORDER 
BY <expression> ASC/DESC) aggregate }
  * function
  *
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FirstValuesFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FirstValuesFunction.java
index fec2eaf..b9114a2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FirstValuesFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FirstValuesFunction.java
@@ -33,7 +33,7 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PInteger;
 
 /**
- * Built-in function for FIRST_VALUES(<expression>, <expression>) WITHIN GROUP 
(ORDER BY <expression> ASC/DESC) aggregate
+ * Built-in function for {@code FIRST_VALUES(<expression>, <expression>) 
WITHIN GROUP (ORDER BY <expression> ASC/DESC) aggregate }
  * function
  *
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LTrimFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LTrimFunction.java
index bfbe2f1..fb2706c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LTrimFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LTrimFunction.java
@@ -34,8 +34,8 @@ import org.apache.phoenix.util.StringUtil;
 
 /**
  * 
- * Implementation of the LTrim(<string>) build-in function. It removes from 
the left end of
- * <string> space character and other function bytes in single byte utf8 
characters 
+ * Implementation of the {@code LTrim(<string>) } build-in function. It 
removes from the left end of
+ * {@code <string> } space character and other function bytes in single byte 
utf8 characters 
  * set.
  * 
  * 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LastValueFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LastValueFunction.java
index f5b3193..dd2a4b6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LastValueFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LastValueFunction.java
@@ -30,7 +30,7 @@ import org.apache.phoenix.parse.LastValueAggregateParseNode;
 import org.apache.phoenix.schema.types.PBoolean;
 
 /**
- * Built-in function for LAST_VALUE(<expression>) WITHIN GROUP (ORDER BY 
<expression> ASC/DESC) aggregate
+ * Built-in function for {@code LAST_VALUE(<expression>) WITHIN GROUP (ORDER 
BY <expression> ASC/DESC) aggregate }
  * function
  *
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LastValuesFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LastValuesFunction.java
index d4a8061..21cb082 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LastValuesFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LastValuesFunction.java
@@ -33,7 +33,7 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PInteger;
 
 /**
- * Built-in function for FIRST_VALUES(<expression>, <expression>) WITHIN GROUP 
(ORDER BY <expression> ASC/DESC) aggregate
+ * Built-in function for {@code FIRST_VALUES(<expression>, <expression>) 
WITHIN GROUP (ORDER BY <expression> ASC/DESC) aggregate }
  * function
  *
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LengthFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LengthFunction.java
index 76d9e59..e69b45a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LengthFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LengthFunction.java
@@ -36,8 +36,8 @@ import org.apache.phoenix.util.StringUtil;
 
 /**
  * 
- * Implementation of the LENGTH(<string>) build-in function. <string> is the 
string
- * of characters we want to find the length of. If <string> is NULL or empty, 
null
+ * Implementation of the {@code LENGTH(<string>) } build-in function. {@code 
<string> } is the string
+ * of characters we want to find the length of. If {@code <string> } is NULL 
or empty, null
  * is returned.
  * 
  * 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/NthValueFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/NthValueFunction.java
index 40502e5..08f6b6e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/NthValueFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/NthValueFunction.java
@@ -31,7 +31,7 @@ import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PInteger;
 
 /**
- * Built-in function for NTH_VALUE(<expression>, <expression>) WITHIN GROUP 
(ORDER BY <expression> ASC/DESC)
+ * Built-in function for {@code NTH_VALUE(<expression>, <expression>) WITHIN 
GROUP (ORDER BY <expression> ASC/DESC) }
  * aggregate function
  *
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentRankAggregateFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentRankAggregateFunction.java
index 51dc816..d56129f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentRankAggregateFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentRankAggregateFunction.java
@@ -34,7 +34,7 @@ import org.apache.phoenix.schema.types.PDataType;
 
 /**
  * 
- * PERCENT_RANK(<expression>[,<expression>]) WITHIN GROUP (ORDER BY 
<expression>[,<expression>] ASC/DESC) aggregate function
+ * {@code PERCENT_RANK(<expression>[,<expression>]) WITHIN GROUP (ORDER BY 
<expression>[,<expression>] ASC/DESC) } aggregate function
  *
  * 
  * @since 1.2.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentileContAggregateFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentileContAggregateFunction.java
index 17bb33c..5a466e1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentileContAggregateFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentileContAggregateFunction.java
@@ -34,7 +34,7 @@ import org.apache.phoenix.schema.types.PDataType;
 
 /**
  * 
- * Built-in function for PERCENTILE_CONT(<expression>) WITHIN GROUP (ORDER BY 
<expression> ASC/DESC) aggregate function
+ * Built-in function for {@code PERCENTILE_CONT(<expression>) WITHIN GROUP 
(ORDER BY <expression> ASC/DESC) aggregate function }
  *
  * 
  * @since 1.2.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentileDiscAggregateFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentileDiscAggregateFunction.java
index 13482f0..b79cf8b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentileDiscAggregateFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PercentileDiscAggregateFunction.java
@@ -33,7 +33,7 @@ import org.apache.phoenix.schema.types.PBoolean;
 
 /**
  * 
- * Built-in function for PERCENTILE_DISC(<expression>) WITHIN GROUP (ORDER BY 
<expression> ASC/DESC) aggregate function
+ * Built-in function for {@code PERCENTILE_DISC(<expression>) WITHIN GROUP 
(ORDER BY <expression> ASC/DESC) aggregate function }
  *
  * 
  * @since 1.2.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RTrimFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RTrimFunction.java
index 81e4f9e..da09eeb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RTrimFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RTrimFunction.java
@@ -42,8 +42,8 @@ import org.apache.phoenix.util.StringUtil;
 
 /**
  * 
- * Implementation of the RTrim(<string>) build-in function. It removes from 
the right end of
- * <string> space character and other function bytes in single byte utf8 
characters set 
+ * Implementation of the {@code RTrim(<string>) } build-in function. It 
removes from the right end of
+ * {@code <string> } space character and other function bytes in single byte 
utf8 characters set 
  * 
  * 
  * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java
index d9048f4..56e0642 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java
@@ -40,6 +40,7 @@ import org.apache.phoenix.schema.types.PLong;
  * <p>
  * Example:
  * <pre>
+ * {@code
  * 0: jdbc:phoenix:localhost> select rand(), rand(), rand(1), rand(2), rand(1) 
from t;
  * 
+----------------------------+----------------------------+----------------------------+----------------------------+-----------------------+
  * |           RAND()           |           RAND()           |          
RAND(1)           |          RAND(2)           |          RAND(1)      |
@@ -56,6 +57,7 @@ import org.apache.phoenix.schema.types.PLong;
  * | 0.8084646053276106         | 0.6969504742211767         | 
0.41008081149220166        | 0.9014476240300544         | 0.41008081149220166   
|
  * 
+----------------------------+----------------------------+----------------------------+----------------------------+-----------------------+
  * 2 rows selected (0.098 seconds)
+ * }
  * </pre>
  */
 @BuiltInFunction(name = RandomFunction.NAME, args = 
{@Argument(allowedTypes={PLong.class},defaultValue="null",isConstant=true)})
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java
index 265b860..1a29bca 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java
@@ -39,7 +39,7 @@ import org.apache.phoenix.schema.types.PVarchar;
  * 
  * Function similar to the regexp_replace function in Postgres, which is used 
to pattern
  * match a segment of the string. Usage:
- * REGEXP_REPLACE(<source_char>,<pattern>,<replace_string>)
+ * {@code REGEXP_REPLACE(<source_char>,<pattern>,<replace_string>) }
  * source_char is the string in which we want to perform string replacement. 
pattern is a
  * Java compatible regular expression string, and we replace all the matching 
part with 
  * replace_string. The first 2 arguments are required and are {@link 
org.apache.phoenix.schema.types.PVarchar},
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java
index 466fd1a..a699a6b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java
@@ -39,12 +39,12 @@ import org.apache.phoenix.schema.types.PVarchar;
 
 /**
  * 
- * Implementation of REGEXP_SUBSTR(<source>, <pattern>, <offset>) built-in 
function,
- * where <offset> is the offset from the start of <string>. Positive offset is 
treated as 1-based,
+ * Implementation of {@code REGEXP_SUBSTR(<source>, <pattern>, <offset>) } 
built-in function,
+ * where {@code <offset> } is the offset from the start of {@code <string> }. 
Positive offset is treated as 1-based,
  * a zero offset is treated as 0-based, and a negative offset starts from the 
end of the string 
- * working backwards. The <pattern> is the pattern we would like to search for 
in the <source> string.
- * The function returns the first occurrence of any substring in the <source> 
string that matches
- * the <pattern> input as a VARCHAR. 
+ * working backwards. The {@code <pattern> } is the pattern we would like to 
search for in the {@code <source> } string.
+ * The function returns the first occurrence of any substring in the {@code 
<source> } string that matches
+ * the {@code <pattern> } input as a VARCHAR. 
  * 
  * 
  * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RoundDateExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RoundDateExpression.java
index 10a83c1..bc0fbf0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RoundDateExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RoundDateExpression.java
@@ -52,7 +52,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
 /**
  * Function used to bucketize date/time values by rounding them to
  * an even increment.  Usage:
- * ROUND(<date/time col 
ref>,<'day'|'hour'|'minute'|'second'|'millisecond'|'week'|'month'|'year'>,<optional
 integer multiplier>)
+ * {@code ROUND(<date/time col 
ref>,<'day'|'hour'|'minute'|'second'|'millisecond'|'week'|'month'|'year'>,<optional
 integer multiplier>) }
  * The integer multiplier is optional and is used to do rollups to a partial 
time unit (i.e. 10 minute rollup)
  * The function returns a {@link org.apache.phoenix.schema.types.PDate}
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StddevPopFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StddevPopFunction.java
index 338031b..e6e79fc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StddevPopFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StddevPopFunction.java
@@ -34,7 +34,7 @@ import org.apache.phoenix.schema.types.PDataType;
 
 /**
  * 
- * Built-in function for STDDEV_POP(<expression>) aggregate function
+ * Built-in function for {@code STDDEV_POP(<expression>) } aggregate function
  * 
  * 
  * @since 1.2.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StddevSampFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StddevSampFunction.java
index 0f22c75..16d27ad 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StddevSampFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StddevSampFunction.java
@@ -34,7 +34,7 @@ import org.apache.phoenix.schema.types.PDataType;
 
 /**
  * 
- * Built-in function for STDDEV_SAMP(<expression>) aggregate function
+ * Built-in function for {@code STDDEV_SAMP(<expression>) } aggregate function
  * 
  * 
  * @since 1.2.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java
index 0d6d1c95..7cf32e0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java
@@ -37,13 +37,13 @@ import org.apache.phoenix.util.StringUtil;
 
 /**
  * 
- * Implementation of the SUBSTR(<string>,<offset>[,<length>]) built-in function
- * where <offset> is the offset from the start of <string>. A positive offset
+ * Implementation of the {@code SUBSTR(<string>,<offset>[,<length>]) } 
built-in function
+ * where  {@code <offset> } is the offset from the start of {@code  <string> 
}. A positive offset
  * is treated as 1-based, a zero offset is treated as 0-based, and a negative
  * offset starts from the end of the string working backwards. The optional
- * <length> argument is the number of characters to return. In the absence of 
the
- * <length> argument, the rest of the string starting from <offset> is 
returned.
- * If <length> is less than 1, null is returned.
+ * {@code <length> } argument is the number of characters to return. In the 
absence of the
+ * {@code <length> }  argument, the rest of the string starting from {@code 
<offset> } is returned.
+ * If {@code <length> }  is less than 1, null is returned.
  *
  * 
  * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TrimFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TrimFunction.java
index 12b53c7..2565445 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TrimFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TrimFunction.java
@@ -33,7 +33,7 @@ import org.apache.phoenix.util.StringUtil;
 
 
 /**
- * Implementation of the Trim(<string>) build-in function. It removes from 
both end of <string>
+ * Implementation of the {@code Trim(<string>) } build-in function. It removes 
from both end of {@code <string> }
  * space character and other function bytes in single byte utf8 characters set.
  * 
  * 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TruncFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TruncFunction.java
index 83297a1..6d223a7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TruncFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TruncFunction.java
@@ -34,7 +34,7 @@ import org.apache.phoenix.schema.types.PVarchar;
  * 
  * Function used to bucketize date/time values by truncating them to
  * an even increment.  Usage:
- * TRUNC(<date/time col 
ref>,<'day'|'hour'|'minute'|'second'|'millisecond'>,[<optional integer 
multiplier>])
+ * {@code TRUNC(<date/time col 
ref>,<'day'|'hour'|'minute'|'second'|'millisecond'>,[<optional integer 
multiplier>]) }
  * The integer multiplier is optional and is used to do rollups to a partial 
time unit (i.e. 10 minute rollup)
  * The function returns a {@link org.apache.phoenix.schema.types.PDate}
  *
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 14e900c..8b04faf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -97,7 +97,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.collect.Multimap;
  * If the WAL is enabled, these updates are then added to the WALEdit and 
attempted to be written to
  * the WAL after the WALEdit has been saved. If any of the index updates fail, 
this server is
  * immediately terminated and we rely on WAL replay to attempt the index 
updates again (see
- * {@link #preWALRestore(ObserverContext, HRegionInfo, HLogKey, WALEdit)}).
+ * #preWALRestore(ObserverContext, HRegionInfo, HLogKey, WALEdit)).
  * <p>
  * If the WAL is disabled, the updates are attempted immediately. No 
consistency guarantees are made
  * if the WAL is disabled - some or none of the index updates may be 
successful. All updates in a
@@ -731,8 +731,8 @@ public class Indexer implements RegionObserver, 
RegionCoprocessor {
     /**
      * Validate that the version and configuration parameters are supported
      * @param hbaseVersion current version of HBase on which <tt>this</tt> 
coprocessor is installed
-     * @param conf configuration to check for allowed parameters (e.g. WAL 
Compression only if >=
-     *            0.94.9)
+     * @param conf configuration to check for allowed parameters (e.g. WAL 
Compression only {@code if >=
+     *            0.94.9) }
      * @return <tt>null</tt> if the version is supported, the error message to 
display otherwise
      */
     public static String validateVersion(String hbaseVersion, Configuration 
conf) {
@@ -757,11 +757,11 @@ public class Indexer implements RegionObserver, 
RegionCoprocessor {
 
   /**
    * Enable indexing on the given table
-   * @param desc {@link TableDescriptor} for the table on which indexing 
should be enabled
- * @param builder class to use when building the index for this table
- * @param properties map of custom configuration options to make available to 
your
+   * @param descBuilder {@link TableDescriptor} for the table on which 
indexing should be enabled
+   * @param builder class to use when building the index for this table
+   * @param properties map of custom configuration options to make available 
to your
    *          {@link IndexBuilder} on the server-side
- * @param priority TODO
+   * @param priority TODO
    * @throws IOException the Indexer coprocessor cannot be added
    */
   public static void enableIndexing(TableDescriptorBuilder descBuilder, 
Class<? extends IndexBuilder> builder,
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
index 5571512..488c922 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
@@ -83,8 +83,6 @@ public abstract class BaseIndexBuilder implements 
IndexBuilder {
      * By default, we always attempt to index the mutation. Commonly this can 
be slow (because the framework spends the
      * time to do the indexing, only to realize that you don't need it) or not 
ideal (if you want to turn on/off
      * indexing on a table without completely reloading it).
-     * 
-     * @throws IOException
      */
     @Override
     public boolean isEnabled(Mutation m) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java
index 7bce22e..5cc9aa0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java
@@ -70,13 +70,13 @@ public interface IndexBuilder extends Stoppable {
    * called concurrently for different mutations, which may or may not be part 
of the same batch.
    * @param mutation update to the primary table to be indexed.
    * @param context index meta data for the mutation
-   * @return a Map of the mutations to make -> target index table name
+   * @return a Map of the mutations to {@code make -> target } index table name
    * @throws IOException on failure
    */
   public Collection<Pair<Mutation, byte[]>> getIndexUpdate(Mutation mutation, 
IndexMetaData context, LocalHBaseState localHBaseState) throws IOException;
 
     /**
-     * Build an index update to cleanup the index when we remove {@link 
KeyValue}s via the normal flush or compaction
+     * Build an index update to cleanup the index when we remove KeyValue s 
via the normal flush or compaction
      * mechanisms. Currently not implemented by any implementors nor called, 
but left here to be implemented if we
      * ever need it. In Jesse's words:
      * 
@@ -89,11 +89,11 @@ public interface IndexBuilder extends Stoppable {
      * b/c its covered by the delete marker, but an older timestamp based read 
would actually show the index row, even
      * after the primary table row is gone due to MAX_VERSIONS requirement.
      *  
-     * @param filtered {@link KeyValue}s that previously existed, but won't be 
included
+     * @param filtered KeyValue s that previously existed, but won't be 
included
      * in further output from HBase.
      * @param context TODO
      * 
-     * @return a {@link Map} of the mutations to make -> target index table 
name
+     * @return a {@link Map} of the mutations to {@code make -> target } index 
table name
      * @throws IOException on failure
      */
   public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(
@@ -131,7 +131,6 @@ public interface IndexBuilder extends Stoppable {
    * @param m mutation that should be indexed.
    * @return <tt>true</tt> if indexing is enabled for the given table. This 
should be on a per-table
    *         basis, as each codec is instantiated per-region.
- * @throws IOException 
    */
   public boolean isEnabled(Mutation m);
   
@@ -139,7 +138,6 @@ public interface IndexBuilder extends Stoppable {
    * True if mutation has an ON DUPLICATE KEY clause
    * @param m mutation
    * @return true if mutation has ON DUPLICATE KEY expression and false 
otherwise.
-   * @throws IOException
    */
   public boolean isAtomicOp(Mutation m);
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexCodec.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexCodec.java
index 6762a30..abcb347 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexCodec.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexCodec.java
@@ -80,7 +80,6 @@ public interface IndexCodec {
      *            mutation that should be indexed.
      * @return <tt>true</tt> if indexing is enabled for the given table. This 
should be on a per-table basis, as each
      *         codec is instantiated per-region.
-     * @throws IOException
      */
     public boolean isEnabled(Mutation m);
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
index bf592ba..474ff38 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
@@ -125,7 +125,7 @@ public class LocalTableState implements TableState {
      * state for any of the columns you are indexing.
      * <p>
      * <i>NOTE:</i> This method should <b>not</b> be used during
-     * {@link IndexCodec#getIndexDeletes(TableState, BatchState, byte[], 
byte[])} as the pending update will not yet have been
+     * IndexCodec#getIndexDeletes(TableState, BatchState, byte[], byte[]) as 
the pending update will not yet have been
      * applied - you are merely attempting to cleanup the current state and 
therefore do <i>not</i>
      * need to track the indexed columns.
      * <p>
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index e38e0b1..cfaa78e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
  *  <li>ignoring memstore timestamps in favor of deciding when we want to 
overwrite keys based on how
  *    we obtain them</li>
  *   <li>ignoring time range updates (so 
- *    {@link ReseekableScanner#shouldUseScanner(Scan, SortedSet, long)} isn't 
supported from
+ *    ReseekableScanner#shouldUseScanner(Scan, SortedSet, long) isn't 
supported from
  *    {@link #getScanner()}).</li>
  * </ol>
  * <p>
@@ -67,7 +67,7 @@ import org.slf4j.LoggerFactory;
  * the previous implementation. Further, by being smart about how we manage 
the KVs, we can drop the
  * extra object creation we were doing to wrap the pending KVs (which we did 
previously to ensure
  * they sorted before the ones we got from the HRegion). We overwrite {@link 
KeyValue}s when we add
- * them from external sources {@link #add(KeyValue, boolean)}, but then don't 
overwrite existing
+ * them from external sources #add(KeyValue, boolean), but then don't 
overwrite existing
  * keyvalues when read them from the underlying table (because pending 
keyvalues should always
  * overwrite current ones) - this logic is all contained in LocalTableState.
  * @see LocalTableState
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/NewerTimestampFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/NewerTimestampFilter.java
index b67aad6..d8f7c8e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/NewerTimestampFilter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/NewerTimestampFilter.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.filter.FilterBase;
 
 /**
  * Server-side only class used in the indexer to filter out keyvalues newer 
than a given timestamp
- * (so allows anything <code><=</code> timestamp through).
+ * (so allows anything {@code <= } timestamp through).
  * <p>
  */
 public class NewerTimestampFilter extends FilterBase {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
index 368e3ce..e58895c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
@@ -60,7 +60,7 @@ public class SingleIndexWriteFailureException extends 
IndexWriteException {
   /**
    * This constructor used to rematerialize this exception when receiving
    * an rpc exception from the server
-   * @param message detail message
+   * @param msg detail message
    */
   public SingleIndexWriteFailureException(String msg) {
       super(IndexWriteException.parseDisableIndexOnFailure(msg));
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index 4bc6b27..528b44f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -52,7 +52,7 @@ public class FilteredKeyValueScanner implements 
ReseekableScanner {
     }
 
     /**
-     * Same a {@link KeyValueScanner#next()} except that we filter out the 
next {@link KeyValue} until we find one that
+     * Same a KeyValueScanner#next() except that we filter out the next {@link 
KeyValue} until we find one that
      * passes the filter.
      *
      * @return the next {@link KeyValue} or <tt>null</tt> if no next {@link 
KeyValue} is present and passes all the
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index 8b7adcf..ab57591 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -191,7 +191,7 @@ public class IndexManagementUtil {
      * Propagate the given failure as a generic {@link IOException}, if it 
isn't already
      * 
      * @param e
-     *            reason indexing failed. If ,tt>null</tt>, throws a {@link 
NullPointerException}, which should unload
+     *            reason indexing failed. If ,<tt>null</tt>, throws a {@link 
NullPointerException}, which should unload
      *            the coprocessor.
      */
     public static void rethrowIndexingException(Throwable e) throws 
IOException {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
index 0f8a7a0..8b8949e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
@@ -31,7 +31,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 /**
- * Codec to encode/decode {@link KeyValue}s and {@link IndexedKeyValue}s 
within a {@link WALEdit}
+ * Codec to encode/decode KeyValue s and IndexedKeyValue s within a WALEdit
  */
 public class KeyValueCodec {
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexFailurePolicy.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexFailurePolicy.java
index c23252a..5a7792e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexFailurePolicy.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexFailurePolicy.java
@@ -34,9 +34,9 @@ public interface IndexFailurePolicy extends Stoppable {
 
   /**
    * Handle the failure of the attempted index updates
-   * @param attempted map of index table -> mutations to apply
+   * @param attempted map of index {@code table -> mutations } to apply
    * @param cause reason why there was a failure
- * @throws IOException 
+   * @throws IOException 
    */
   public void
       handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted, 
Exception cause) throws IOException;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
index 23dc4f7..1676669 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
@@ -133,7 +133,7 @@ public class IndexWriter implements Stoppable {
   }
 
   /**
-   * see {@link #writeAndHandleFailure(Collection)}.
+   * see #writeAndHandleFailure(Collection).
    * @param toWrite
    * @throws IOException
    */
@@ -194,7 +194,7 @@ public class IndexWriter implements Stoppable {
     }
     
     /**
-   * see {@link #write(Collection)}
+   * see #write(Collection)
    * @param toWrite
    * @throws IndexWriteException
    */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index 73110a2..4dbe579 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -42,7 +42,7 @@ public class IndexWriterUtils {
 
   /**
    * Maximum number of threads to allow per-table when writing. Each writer 
thread (from
-   * {@link IndexWriterUtils#NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY}) 
has a single HTable.
+   * IndexWriterUtils#NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY) has a 
single HTable.
    * However, each table is backed by a threadpool to manage the updates to 
that table. this
    * specifies the number of threads to allow in each of those tables. 
Generally, you shouldn't need
    * to change this, unless you have a small number of indexes to which most 
of the writes go.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index d8e1bae..1b6661d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -56,7 +56,7 @@ import static 
org.apache.phoenix.util.ServerUtil.wrapInDoNotRetryIOException;
  * you need to at least attempt all writes and know their result; for 
instance, this is fine for doing WAL recovery -
  * it's not a performance intensive situation and we want to limit the the 
edits we need to retry.
  * <p>
- * On failure to {@link #write(Multimap)}, we return a {@link 
MultiIndexWriteFailureException} that contains the list of
+ * On failure to #write(Multimap), we return a MultiIndexWriteFailureException 
that contains the list of
  * {@link HTableInterfaceReference} that didn't complete successfully.
  * <p>
  * Failures to write to the index can happen several different ways:
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
index 3c0ccf0..6d52e6d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
@@ -38,7 +38,7 @@ public class PerRegionIndexWriteCache {
 
   /**
    * Get the edits for the current region. Removes the edits from the cache. 
To add them back, call
-   * {@link #addEdits(HRegion, HTableInterfaceReference, Collection)}.
+   * #addEdits(HRegion, HTableInterfaceReference, Collection).
    * @param region
    * @return Get the edits for the given region. Returns <tt>null</tt> if 
there are no pending edits
    *         for the region
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 7ba8862..37e9626 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -124,7 +124,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.collect.Sets;
  * 
  * Class that builds index row key from data row key and current state of
  * row and caches any covered columns. Client-side serializes into byte array 
using 
- * @link #serialize(PTable, ImmutableBytesWritable)}
+ * #serialize(PTable, ImmutableBytesWritable)
  * and transmits to server-side through either the 
  * {@link org.apache.phoenix.index.PhoenixIndexCodec#INDEX_PROTO_MD}
  * Mutation attribute or as a separate RPC call using 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheClient.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheClient.java
index bd308c0..1f6dd73 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheClient.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheClient.java
@@ -48,7 +48,7 @@ public class IndexMetaDataCacheClient {
      * Construct client used to send index metadata to each region server
      * for caching during batched put for secondary index maintenance.
      * @param connection the client connection
-     * @param cacheUsingTableRef table ref to table that will use the cache 
during its scan
+     * @param cacheUsingTable table ref to table that will use the cache 
during its scan
      */
     public IndexMetaDataCacheClient(PhoenixConnection connection, PTable 
cacheUsingTable) {
         serverCache = new ServerCacheClient(connection);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index 746d4a5..7cef8dc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -286,7 +286,7 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder {
      *       UPSERT VALUES ON DUPLICATE KEY IGNORE followed by UPSERT VALUES 
ON DUPLICATE KEY UPDATE
      * 2) Short value tracking how many times the next first clause should be 
executed. This
      *    optimizes the same clause be executed many times by only serializing 
it once.
-     * 3) Repeating {List<Expression>, PTable} pairs that encapsulate the ON 
DUPLICATE KEY clause.
+     * 3) Repeating {@code List<Expression>, PTable } pairs that encapsulate 
the ON DUPLICATE KEY clause.
      * @param table table representing columns being updated
      * @param expressions list of expressions to evaluate for updating columns
      * @return serialized byte array representation of ON DUPLICATE KEY UPDATE 
info
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 48ea449..1e37c89 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -377,11 +377,8 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
 
     /**
      * Check config for whether to disable index on index write failures
-     * @param htd
-     * @param config
-     * @param connection
-     * @return The table config for {@link 
PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE}
-     * @throws SQLException
+     * @param env
+     * @return
      */
     public static boolean 
getDisableIndexOnFailure(RegionCoprocessorEnvironment env) {
         TableDescriptor htd = env.getRegion().getTableDescriptor();
@@ -460,7 +457,7 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
      * @param iwe original IndexWriteException
      * @param connection connection to use
      * @param config config used to get retry settings
-     * @throws Exception
+     * @throws IOException
      */
     public static void doBatchWithRetries(MutateCommand mutateCommand,
             IndexWriteException iwe, PhoenixConnection connection, 
ReadOnlyProps config)
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 21264ac..7ec8ebd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -50,7 +50,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFac
  * 
  * JDBC Driver implementation of Phoenix for production.
  * To use this driver, specify the following URL:
- *     jdbc:phoenix:<zookeeper quorum server name>;
+ *     {@code jdbc:phoenix:<zookeeper quorum server name>; }
  * Only an embedded driver is currently supported (Phoenix client
  * runs in the same JVM as the driver). Connections are lightweight
  * and are not pooled. The last part of the URL, the hbase zookeeper
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index b960074..9be11d2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -1758,7 +1758,7 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
 
     /**
      * Execute the current batch of statements. If any exception occurs
-     * during execution, a {@link 
org.apache.phoenix.exception.BatchUpdateException}
+     * during execution, a org.apache.phoenix.exception.BatchUpdateException
      * is thrown which includes the index of the statement within the
      * batch when the exception occurred.
      */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java
index ebdcefb..fad3828 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java
@@ -85,7 +85,16 @@ public class HashCacheClient  {
      * Send the results of scanning through the scanner to all
      * region servers for regions of the table that will use the cache
      * that intersect with the minMaxKeyRange.
-     * @param scanner scanner for the table or intermediate results being 
cached
+     * @param keyRanges
+     * @param cacheId
+     * @param iterator
+     * @param estimatedSize
+     * @param onExpressions
+     * @param singleValueOnly
+     * @param usePersistentCache
+     * @param cacheUsingTable
+     * @param keyRangeRhsExpression
+     * @param keyRangeRhsValues
      * @return client-side {@link ServerCache} representing the added hash 
cache
      * @throws SQLException 
      * @throws MaxServerCacheSizeExceededException if size of hash cache 
exceeds max allowed
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
index 8c28c3c..a951939 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
@@ -36,7 +36,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.collect.Iterables;
 
 /**
  * MapReduce mapper that converts CSV input lines into KeyValues that can be 
written to HFiles.
- * <p/>
+ * 
  * KeyValues are produced by executing UPSERT statements on a Phoenix 
connection and then
  * extracting the created KeyValues and rolling back the statement execution 
before it is
  * committed to HBase.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
index aa76572..dccbe7a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.Cell;
  * Implementing this interface and configuring it via the {@link
  * 
org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil#UPSERT_HOOK_CLASS_CONFKEY}
  * configuration key.
- * <p/>
+ * 
  * The intention of such a hook is to allow coproccessor-style operations to 
be peformed on
  * data that is being bulk-loaded via MapReduce.
  */
@@ -35,7 +35,7 @@ public interface ImportPreUpsertKeyValueProcessor {
     /**
      * Process a list of KeyValues before they are written to an HFile. The 
supplied list of
      * KeyValues contain all data that is to be written for a single Phoenix 
row.
-     * <p/>
+     * 
      * Implementors can filter certain KeyValues from the list, augment the 
list, or return the
      * same list.
      *
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/JsonToKeyValueMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/JsonToKeyValueMapper.java
index e8e1b18..fd4bb34 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/JsonToKeyValueMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/JsonToKeyValueMapper.java
@@ -32,7 +32,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
 
 /**
  * MapReduce mapper that converts JSON input lines into KeyValues that can be 
written to HFiles.
- * <p/>
+ * 
  * KeyValues are produced by executing UPSERT statements on a Phoenix 
connection and then
  * extracting the created KeyValues and rolling back the statement execution 
before it is
  * committed to HBase.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index a8e424a..be20cc6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -160,7 +160,6 @@ public class PhoenixInputFormat<T extends DBWritable> 
extends InputFormat<NullWr
      * @param context
      * @return
      * @throws IOException
-     * @throws SQLException
      */
     protected  QueryPlan getQueryPlan(final JobContext context, final 
Configuration configuration)
             throws IOException {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
index 0e2411e..7819c53 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
@@ -51,7 +51,7 @@ public class PhoenixInputSplit extends InputSplit implements 
Writable {
     
    /**
     * 
-    * @param keyRange
+    * @param scans
     */
     public PhoenixInputSplit(final List<Scan> scans) {
         this(scans, 0, null);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
index b2a0fde..3621c99 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
@@ -39,7 +39,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
 
 /**
  * MapReduce mapper that converts input lines into KeyValues based on the 
Regex that can be written to HFiles.
- * <p/>
+ * 
  * KeyValues are produced by executing UPSERT statements on a Phoenix 
connection and then
  * extracting the created KeyValues and rolling back the statement execution 
before it is
  * committed to HBase.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java
index 8387b2c..9efb706 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java
@@ -55,7 +55,7 @@ public class MutationMetricQueue {
     
     /**
      * Publish the metrics to wherever you want them published. The internal 
state is cleared out after every publish.
-     * @return map of table name -> list of pair of (metric name, metric value)
+     * @return map of table {@code name -> list } of pair of (metric name, 
metric value)
      */
     public Map<String, Map<MetricType, Long>> aggregate() {
         Map<String, Map<MetricType, Long>> publishedMetrics = new HashMap<>();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java
index 97a2b84..4585118 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java
@@ -70,7 +70,7 @@ public class ReadMetricQueue {
     }
 
     /**
-     * @return map of table name -> list of pair of (metric name, metric value)
+     * @return map of table {@code name -> list } of pair of (metric name, 
metric value)
      */
     public Map<String, Map<MetricType, Long>> aggregate() {
         Map<String, Map<MetricType, Long>> publishedMetrics = new HashMap<>();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/AndRewriterBooleanParseNodeVisitor.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/AndRewriterBooleanParseNodeVisitor.java
index e8c7ee2..762181e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/AndRewriterBooleanParseNodeVisitor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/AndRewriterBooleanParseNodeVisitor.java
@@ -24,7 +24,7 @@ import 
org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
 
 /**
  * Base visitor for rewrite {@link ParseNode},only further visit down for 
{@link AndParseNode}.
- * A example is {@link 
org.apache.phoenix.optimize.QueryOptimizer.WhereConditionRewriter}, which
+ * A example is 
org.apache.phoenix.optimize.QueryOptimizer.WhereConditionRewriter, which
  * rewrites columns in dataTable to columns in indexTable, and removes 
parseNodes which have
  * columns not in indexTable.
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java
index 51cb833..f3d2e3f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java
@@ -27,7 +27,7 @@ import org.apache.phoenix.util.QueryUtil;
 
 /**
  * 
- * Common base class for =, >, >=, <, <=, !=
+ * Common base class {@code =, >, >=, <, <=, != }
  *
  * 
  * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/GreaterThanOrEqualParseNode.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/GreaterThanOrEqualParseNode.java
index 6e0337a..b1cb994 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/GreaterThanOrEqualParseNode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/GreaterThanOrEqualParseNode.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 
 /**
  * 
- * Node representing the greater than or equal to operator (>=) in SQL
+ * Node representing the greater than or equal to operator {@code (>=) } in SQL
  *
  * 
  * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/GreaterThanParseNode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/GreaterThanParseNode.java
index d6b380c..ca4ae0a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/GreaterThanParseNode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/GreaterThanParseNode.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 
 /**
  * 
- * Node representing the greater than operator (>) in SQL
+ * Node representing the greater than operator {@code (>) } in SQL
  *
  * 
  * @since 0.1
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
index 1bc072e..60334ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
@@ -58,7 +58,7 @@ public class HintNode {
         */
        NO_INDEX,
        /**
-       * Hint of the form INDEX(<table_name> <index_name>...)
+       * Hint of the form {@code INDEX(<table_name> <index_name>...) }
        * to suggest usage of the index if possible. The first
        * usable index in the list of indexes will be choosen.
        * Table and index names may be surrounded by double quotes
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/LessThanOrEqualParseNode.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/LessThanOrEqualParseNode.java
index 345a8bd..18aa349 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/LessThanOrEqualParseNode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/LessThanOrEqualParseNode.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 
 /**
  * 
- * Node representing the less than or equal to operator (<=) in SQL
+ * Node representing the less than or equal to operator {@code (<=) } in SQL
  *
  * 
  * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/LessThanParseNode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/LessThanParseNode.java
index 9472d97..e6e86a2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/LessThanParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/LessThanParseNode.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 
 /**
  * 
- * Node representing the less than operator (<) in SQL
+ * Node representing the less than operator {@code (<) } in SQL
  *
  * 
  * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/NotEqualParseNode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/NotEqualParseNode.java
index 8d95b51..9114aca 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/NotEqualParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/NotEqualParseNode.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 
 /**
  * 
- * Node representing a not equal expression (!=,<>) in SQL
+ * Node representing a not equal expression {@code (!=,<>) } in SQL
  *
  * 
  * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 3c84bc0..e1f365e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -84,7 +84,7 @@ public class ParseNodeFactory {
      *
      * Key used to look up a built-in function using the combination of
      * the lowercase name and the number of arguments. This disambiguates
-     * the aggregate MAX(<col>) from the non aggregate MAX(<col1>,<col2>).
+     * the aggregate {@code MAX(<col>) } from the non aggregate {@code 
MAX(<col1>,<col2>) }.
      *
      * 
      * @since 0.1
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java
index 74795b3..02de80f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java
@@ -58,9 +58,9 @@ public class ParseNodeRewriter extends 
TraverseAllParseNodeVisitor<ParseNode> {
      * <pre>
      * Resolve the inner alias for the selectStament.
      * For following sql:
-     *   select aid,sum(age) agesum from merge where age >=11 and age<=33 
group by aid order by agesum
+     *   {@code select aid,sum(age) agesum from merge where age >=11 and age 
<= 33 group by aid order by agesum }
      * "agesum" is an alias of "sum(age)", so for this method, the above sql 
is rewritten to:
-     *   select aid,sum(age) agesum from merge where age >=11 and age<=33 
group by aid order by sum(age)
+     *   {@code  select aid,sum(age) agesum from merge where age >= 11 and <= 
33 group by aid order by sum(age) }
      * </pre>
      * @param selectStament
      * @param phoenixConnection
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ShowTablesStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ShowTablesStatement.java
index c9a8d0a..0371a45 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ShowTablesStatement.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ShowTablesStatement.java
@@ -25,7 +25,7 @@ import javax.annotation.Nullable;
 import java.util.Objects;
 
 /**
- * ParseNode implementation for SHOW TABLES [IN <schema>].
+ * ParseNode implementation for SHOW TABLES {@code [IN <schema>] }.
  */
 public class ShowTablesStatement extends ShowStatement {
     // Schema for table listing. null implies the the db for this connection 
is used.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 6d1c1a4..102159d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -165,7 +165,7 @@ public interface ConnectionQueryServices extends 
QueryServices, MetaDataMutated
     /**
      * Removes cache {@link GuidePostsInfo} for the table with the given name. 
If no cached guideposts are present, this does nothing.
      *
-     * @param tableName The table to remove stats for
+     * @param key
      */
     void invalidateStats(GuidePostsKey key);
     
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 55ba683..597ae27 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -404,7 +404,6 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
      * @param services base services from where we derive our default 
configuration
      * @param connectionInfo to provide connection information
      * @param info hbase configuration properties
-     * @throws SQLException
      */
     public ConnectionQueryServicesImpl(QueryServices services, ConnectionInfo 
connectionInfo, Properties info) {
         super(services);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 382af01..17562d6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -739,8 +739,8 @@ public class ConnectionlessQueryServicesImpl extends 
DelegateQueryServices imple
      * Manually adds {@link GuidePostsInfo} for a table to the client-side 
cache. Not a
      * {@link ConnectionQueryServices} method. Exposed for testing purposes.
      *
-     * @param tableName Table name
-     * @param stats Stats instance
+     * @param key
+     * @param info
      */
     public void addTableStats(GuidePostsKey key, GuidePostsInfo info) {
         this.guidePostsCache.put(Objects.requireNonNull(key), info);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCacheImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCacheImpl.java
index e19d630..0b7a4bf 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCacheImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCacheImpl.java
@@ -82,7 +82,7 @@ public class GuidePostsCacheImpl implements GuidePostsCache {
     }
 
     /**
-     * Returns the PTableStats for the given <code>tableName</code, using the 
provided
+     * Returns the PTableStats for the given <code>tableName</code>, using the 
provided
      * <code>valueLoader</code> if no such mapping exists.
      *
      * @see com.google.common.cache.LoadingCache#get(Object)
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/HBaseFactoryProvider.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/HBaseFactoryProvider.java
index c04db02..780dbfd 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/HBaseFactoryProvider.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/HBaseFactoryProvider.java
@@ -21,7 +21,7 @@ import org.apache.phoenix.util.InstanceResolver;
 
 /**
  * Manages factories that provide extension points for HBase.
- * <p/>
+ * 
  * Dependent modules may register their own implementations of the following 
using {@link java.util.ServiceLoader}:
  * <ul>
  *     <li>{@link ConfigurationFactory}</li>
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
index 0912c2c..c3e0eb5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 
 /**
- * Factory for creating {@link HConnection}
+ * Factory for creating HConnection
  *
  * 
  */
@@ -33,7 +33,7 @@ public interface HConnectionFactory {
     /**
      * Creates HConnection to access HBase clusters.
      * 
-     * @param configuration object
+     * @param conf object
      * @return A HConnection instance
      */
     Connection createConnection(Configuration conf) throws IOException;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/PropertyPolicy.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/PropertyPolicy.java
index a59f912..239b89f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/PropertyPolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/PropertyPolicy.java
@@ -27,12 +27,13 @@ import java.util.Properties;
  * with exception
  *
  * An example will be:
- *<code>
+ *<pre>
+ *{@code 
  *public class Customized PropertyPolicy implements PropertyPolicy {
  *  final static Set<String> propertiesKeyAllowed = 
Collections.unmodifiableSet(
  *      new 
HashSet<>(asList("DriverMajorVersion","DriverMinorVersion","DriverName","CurrentSCN")));
  *
- *      @Override public void evaluate(Properties properties) {
+ *      public void evaluate(Properties properties) {
  *      final Set<String> offendingProperties = new HashSet<>();
  *
  *      for(Object k:properties.keySet()){
@@ -44,7 +45,8 @@ import java.util.Properties;
  *      "properties not allowed. offending properties" + offendingProperties);
  *  }
  *}
- *</code>
+ *}
+ *</pre>
  */
 public interface PropertyPolicy {
     /**
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/PropertyPolicyProvider.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/PropertyPolicyProvider.java
index 27cb2d3..4ba4c56 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/PropertyPolicyProvider.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/PropertyPolicyProvider.java
@@ -21,7 +21,7 @@ import org.apache.phoenix.util.InstanceResolver;
 
 /**
  * Manages providers that provide property policy for Phoenix.
- * <p/>
+ * 
  * Dependent modules may register their own implementations of the following 
using {@link java.util.ServiceLoader}:
  * <ul>
  *     <li>{@link PropertyPolicy}</li>
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 7e4d8b0..c869f2b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -71,7 +71,7 @@ public interface QueryServices extends SQLCloseable {
     /**
         * max size to spool the the result into
         * ${java.io.tmpdir}/ResultSpoolerXXX.bin if
-        * {@link QueryServices#SPOOL_THRESHOLD_BYTES_ATTRIB } is reached.
+        * QueryServices#SPOOL_THRESHOLD_BYTES_ATTRIB is reached.
         * <p>
         * default is unlimited(-1)
         * <p>
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnModifier.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnModifier.java
index a096855..fdd55b1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnModifier.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnModifier.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
 
 /**
- * A ColumnModifier implementation modifies how bytes are stored in a primary 
key column.</p>  
+ * A ColumnModifier implementation modifies how bytes are stored in a primary 
key column.
  * The {@link ColumnModifier#apply apply} method is called when the bytes for 
a specific column are first written to HBase and again
  * when they are read back.  Phoenix attemps to minimize calls to apply when 
bytes are read out of HBase.   
  * 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/SortOrder.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/SortOrder.java
index 5aec70e..895b6ef 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/SortOrder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SortOrder.java
@@ -22,27 +22,27 @@ import 
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
 
 /**
- * Specifies the sort order on disk of row key columns. The default is ASC.</p>
+ * Specifies the sort order on disk of row key columns. The default is ASC.
  * 
  * HBase always stores row keys in ascending order resulting in scans to also 
be
  * sorted by ascending row keys. This enum is used to associate a sort order
  * with each row key column to allow storing row key columns in descending
- * order.</p>
+ * order.
  * 
  * The often cited example of when you may want to do this is a row key that 
has
  * a date component. If all other parts of the row key are equal, a scan would
  * return the data from least recent to most recent; to get the scan to return
  * the most recent data first, the time component of the row key can be marked
- * as "desc".</p>
+ * as "desc".
  * 
  * Internally, the bits of values for columns marked as "desc" are inverted 
before handing
  * them to HBase to persist; they are inverted again when read back out.
- * </p>
  * 
- * Example DDL:</p>
+ * 
+ * Example DDL:
  * 
  * CREATE TABLE Events(event_type INTEGER NOT NULL, event_date DATE NOT NULL, 
event_name VARCHAR NOT NULL 
- * CONSTRAINT PK PRIMARY KEY (event_type, event_date DESC))</p>
+ * CONSTRAINT PK PRIMARY KEY (event_type, event_date DESC))
  * 
  * @since 1.2
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 6c82900..84ea65e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -305,7 +305,6 @@ public class DefaultStatisticsCollector implements 
StatisticsCollector {
      * 
      * @param results
      *            next batch of {@link KeyValue}s
-     * @throws IOException 
      */
     @Override
     public void collectStatistics(final List<Cell> results) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java
index 0f325f4..2ae399e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java
@@ -85,7 +85,7 @@ public class GuidePostsInfo {
      *            Maximum length of a guidePost collected
      * @param guidePostsCount
      *            Number of guidePosts
-     * @param gpTimestamps
+     * @param updateTimes
      *            Times at which guidePosts were updated/created
      */
     public GuidePostsInfo(List<Long> byteCounts, ImmutableBytesWritable 
guidePosts, List<Long> rowCounts, int maxLength,
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java
index 2b82936..8e0bd29 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java
@@ -93,7 +93,6 @@ public class GuidePostsInfoBuilder {
      * @param row number of rows in the guidepost
      * @param byteCount number of bytes in the guidepost
      * @param updateTimestamp time at which guidepost was created/updated.
-     * @throws IOException
      */
     public boolean trackGuidePost(ImmutableBytesWritable row, long byteCount, 
long rowCount,
             long updateTimestamp) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index 0fd6390..bed0ee9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -50,7 +50,6 @@ public interface StatisticsCollector extends Closeable {
     /**
      * Collect statistics for the given list of cells. This method can be 
called multiple times
      * during collection of statistics.
-     * @throws IOException 
      */
     void collectStatistics(List<Cell> results);
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index bb2c22f..74eb88d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -152,7 +152,7 @@ public class StatisticsWriter implements Closeable {
     }
 
     /**
-     * Update a list of statistics for a given region. If the UPDATE 
STATISTICS <tablename> query is issued then we use
+     * Update a list of statistics for a given region. If the UPDATE 
STATISTICS {@code <tablename> } query is issued then we use
      * Upsert queries to update the table If the region gets splitted or the 
major compaction happens we update using
      * HTable.put()
      * 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
index a6dc4c3..12e8383 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
@@ -44,8 +44,8 @@ import 
org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
  * The datatype for PColummns that are Arrays. Any variable length array would 
follow the below order. Every element
  * would be seperated by a seperator byte '0'. Null elements are counted and 
once a first non null element appears we
  * write the count of the nulls prefixed with a seperator byte. Trailing nulls 
are not taken into account. The last non
- * null element is followed by two seperator bytes. For eg a, b, null, null, 
c, null -> 65 0 66 0 0 2 67 0 0 0 a null
- * null null b c null d -> 65 0 0 3 66 0 67 0 0 1 68 0 0 0. The reason we use 
this serialization format is to allow the
+ * null element is followed by two seperator bytes. For eg {@code a, b, null, 
null, c, null -> 65 0 66 0 0 2 67 0 0 0 a null
+ * null null b c null d -> 65 0 0 3 66 0 67 0 0 1 68 0 0 0 }. The reason we 
use this serialization format is to allow the
  * byte array of arrays of the same type to be directly comparable against 
each other. This prevents a costly
  * deserialization on compare and allows an array column to be used as the 
last column in a primary key constraint.
  */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
index 21e7751..3763c5f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
@@ -613,7 +613,7 @@ public abstract class PDataType<T> implements DataType<T>, 
Comparable<PDataType<
     /**
      * Deserialize a variable length byte array into a BigDecimal. Note that 
because of the normalization that gets done
      * to the scale, if you roundtrip a BigDecimal, it may not be equal before 
and after. However, the before and after
-     * number will always compare to be equal (i.e. 
<nBefore>.compareTo(<nAfter>) == 0)
+     * number will always compare to be equal {@code (i.e. 
<nBefore>.compareTo(<nAfter>) == 0) }
      *
      * @param bytes
      *            the bytes containing the number
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index c6a48dd..14d17ea 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -167,8 +167,6 @@ public interface PhoenixTransactionContext {
      *
      * @param dataTable  the table that the DDL command works on
      * @throws SQLException
-     * @throws InterruptedException
-     * @throws TimeoutException
      */
     public void commitDDLFence(PTable dataTable)
             throws SQLException;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionProvider.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionProvider.java
index 3af554b..f50b6b5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionProvider.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionProvider.java
@@ -63,7 +63,6 @@ public interface PhoenixTransactionProvider {
      *  @param  timestamp - start timestamp
      *  @param  commitTimestamp - commit timestamp
      * @return put operation with metadata
-     * @throws IOException
      */
     public Put markPutAsCommitted(Put put, long timestamp, long 
commitTimestamp);
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
index 8dc63b1..6a1e075 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java
@@ -24,7 +24,6 @@ package org.apache.phoenix.util;
 public class DefaultEnvironmentEdge extends EnvironmentEdge {
   /**
    * {@inheritDoc}
-   * <p/>
    * This implementation returns {@link System#currentTimeMillis()}
    */
   @Override
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java
index c6854ab..68a77c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java
@@ -104,7 +104,7 @@ public class ExpressionUtil {
 
     /**
      * check the whereExpression to see if the columnExpression is constant.
-     * eg. for "where a =3 and b > 9", a is constant,but b is not.
+     * eg. for {@code "where a = 3 and b > 9" }, a is constant,but b is not.
      * @param columnExpression
      * @param whereExpression
      * @return
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index b4c5e35..78a393b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -231,8 +231,8 @@ public class MetaDataUtil {
      * Encode HBase and Phoenix version along with some server-side config 
information such as whether WAL codec is
      * installed (necessary for non transactional, mutable secondar indexing), 
and whether systemNamespace mapping is enabled.
      * 
-     * @param env
-     *            RegionCoprocessorEnvironment to access HBase version and 
Configuration.
+     * @param hbaseVersionStr
+     * @param config
      * @return long value sent back during initialization of a cluster 
connection.
      */
     public static long encodeVersion(String hbaseVersionStr, Configuration 
config) {
@@ -801,10 +801,9 @@ public class MetaDataUtil {
     
     /**
      * This function checks if all regions of a table is online
+     * @param conf
      * @param table
      * @return true when all regions of a table are online
-     * @throws IOException
-     * @throws
      */
     public static boolean tableRegionsOnline(Configuration conf, PTable table) 
{
         try (ClusterConnection hcon =
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
index cd12261..34c9828 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
@@ -24,7 +24,7 @@ import org.apache.phoenix.call.CallWrapper;
 /**
  * Executes {@code Callable}s using a context classloader that is set up to 
load classes from
  * Phoenix.
- * <p/>
+ * 
  * Loading HBase configuration settings and endpoint coprocessor classes is 
done via the context
  * classloader of the calling thread. When Phoenix is being run via a 
JDBC-enabled GUI, the
  * driver is often loaded dynamically and executed via multiple threads, which 
makes it difficult
@@ -62,7 +62,7 @@ public class PhoenixContextExecutor {
      * instead of the currently-set context classloader of the current thread. 
This allows loading
      * dynamically-loaded classes and configuration files using the same 
classloader used to
      * load the rest of the JDBC driver.
-     * <p/>
+     * 
      * The context classloader of the current thread is reset to its original 
value after the
      * callable has been executed.
      *
@@ -84,7 +84,6 @@ public class PhoenixContextExecutor {
      *
      * @param target the callable to be executed
      * @return the return value from the callable
-     * @throws Exception any exception thrown by the underlying callable
      */
     public static <T> T callWithoutPropagation(Callable<T> target) {
         try {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
index 1c7519c..c157873 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
@@ -188,7 +188,7 @@ public class PhoenixKeyValueUtil {
 
     /**
      * Estimates the storage size of a row
-     * @param mutations map from table to row to RowMutationState
+     * @param tableMutationMap map from table to row to RowMutationState
      * @return estimated row size
      */
     public static long getEstimatedRowMutationSize(
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index e844158..8c68357 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -1092,7 +1092,7 @@ public class PhoenixRuntime {
      * of their values in the object array.
      * @return values encoded in a byte array 
      * @throws SQLException
-     * @see {@link #decodeValues(Connection, String, byte[], List)}
+     * @see decodeValues(Connection, String, byte[], List)
      */
     @Deprecated
     public static byte[] encodeValues(Connection conn, String fullTableName, 
Object[] values, List<Pair<String, String>> columns) throws SQLException {
@@ -1158,7 +1158,7 @@ public class PhoenixRuntime {
      * of their values in the object array.
      * @return values encoded in a byte array 
      * @throws SQLException
-     * @see {@link #decodeValues(Connection, String, byte[], List)}
+     * @see decodeValues(Connection, String, byte[], List)
      */
     public static byte[] encodeColumnValues(Connection conn, String 
fullTableName, Object[] values, List<Pair<String, String>> columns) throws 
SQLException {
         PTable table = getTable(conn, fullTableName);
@@ -1409,11 +1409,12 @@ public class PhoenixRuntime {
      *    requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
      *    PhoenixRuntime.resetMetrics(rs);
      * }
+     * }
      * </pre>
      * 
      * @param rs
      *            result set to get the metrics for
-     * @return a map of (table name) -> (map of (metric name) -> (metric 
value))
+     * @return a map of {@code (table name) -> (map of (metric name) -> 
(metric value)) }
      * @throws SQLException
      */
     public static Map<String, Map<MetricType, Long>> 
getRequestReadMetricInfo(ResultSet rs) throws SQLException {
@@ -1443,11 +1444,12 @@ public class PhoenixRuntime {
      *    requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
      *    PhoenixRuntime.resetMetrics(rs);
      * }
+     * }
      * </pre>
      * 
      * @param rs
      *            result set to get the metrics for
-     * @return a map of metric name -> metric value
+     * @return a map of {@code  metric name -> metric value }
      * @throws SQLException
      */
     public static Map<MetricType, Long> 
getOverAllReadRequestMetricInfo(ResultSet rs) throws SQLException {
@@ -1482,11 +1484,12 @@ public class PhoenixRuntime {
      *    mutationReadMetrics = 
PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
      *    PhoenixRuntime.resetMetrics(rs);
      * }
+     * }
      * </pre>
      *  
      * @param conn
      *            connection to get the metrics for
-     * @return a map of (table name) -> (map of (metric name) -> (metric 
value))
+     * @return a map of {@code  (table name) -> (map of (metric name) -> 
(metric value)) }
      * @throws SQLException
      */
     public static Map<String, Map<MetricType, Long>> 
getWriteMetricInfoForMutationsSinceLastReset(Connection conn) throws 
SQLException {
@@ -1521,10 +1524,11 @@ public class PhoenixRuntime {
      *    mutationReadMetrics = 
PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
      *    PhoenixRuntime.resetMetrics(rs);
      * }
+     * }
      * </pre> 
      * @param conn
      *            connection to get the metrics for
-     * @return  a map of (table name) -> (map of (metric name) -> (metric 
value))
+     * @return  a map of {@code  (table name) -> (map of (metric name) -> 
(metric value)) }
      * @throws SQLException
      */
     public static Map<String, Map<MetricType, Long>> 
getReadMetricInfoForMutationsSinceLastReset(Connection conn) throws 
SQLException {
@@ -1541,7 +1545,7 @@ public class PhoenixRuntime {
     /**
      * Reset the read metrics collected in the result set.
      * 
-     * @see {@link #getRequestReadMetrics(ResultSet)} {@link 
#getOverAllReadRequestMetrics(ResultSet)}
+     * @see  #getRequestReadMetrics(ResultSet) 
#getOverAllReadRequestMetrics(ResultSet)
      * @param rs
      * @throws SQLException
      */
@@ -1553,7 +1557,7 @@ public class PhoenixRuntime {
     /**
      * Reset the mutation and reads-for-mutations metrics collected in the 
connection.
      * 
-     * @see {@link #getReadMetricsForMutationsSinceLastReset(Connection)} 
{@link #getWriteMetricsForMutationsSinceLastReset(Connection)}
+     * @see #getReadMetricsForMutationsSinceLastReset(Connection) 
#getWriteMetricsForMutationsSinceLastReset(Connection)
      * @param conn
      * @throws SQLException
      */
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
index 5b55a93..a817581 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
@@ -285,9 +285,9 @@ public class ReadOnlyProps implements 
Iterable<Entry<String, String>> {
     }
 
     /**
-     * Get the properties as a <code>Map<String,String></code>
+     * Get the properties as a {@code Map<String,String>}
      * 
-     * @return Map<String,String> 
+     * @return {@code Map<String,String>}
      */
     public Map<String,String> asMap() {
         return props;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
index 6799c46..c642f36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
@@ -340,7 +340,7 @@ public class StringUtil {
      *            the values to be stored in all elements of the array
      * @param fillFromIdx
      *            the index of the first element (inclusive) to be used as 
fill values
-     * @param filToIdx
+     * @param fillToIdx
      *            the index of the last element (exclusive) to be used as fill 
value
      * @param invertFill
      *            if true inverts the bits in fill before filling the array
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ViewUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ViewUtil.java
index 4d99b8b..24b17a7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ViewUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ViewUtil.java
@@ -93,7 +93,7 @@ public class ViewUtil {
 
     /**
      * Find all the descendant views of a given table or view in a depth-first 
fashion.
-     * Note that apart from scanning the parent->child links, we also validate 
each view
+     * Note that apart from scanning the {@code parent->child } links, we also 
validate each view
      * by trying to resolve it.
      * Use {@link ViewUtil#findAllRelatives(Table, byte[], byte[], byte[], 
LinkType,
      * TableViewFinderResult)} if you want to find other links and don't care 
about orphan results.
@@ -298,7 +298,7 @@ public class ViewUtil {
     
     /**
      * Check metadata to find if a given table/view has any immediate child 
views. Note that this
-     * is not resilient to orphan parent->child links.
+     * is not resilient to orphan {@code parent->child } links.
      * @param sysCatOrsysChildLink For older (pre-4.15.0) clients, we look for 
child links inside
      *                             SYSTEM.CATALOG, otherwise we look for them 
inside
      *                             SYSTEM.CHILD_LINK
@@ -335,14 +335,15 @@ public class ViewUtil {
     }
 
     /**
-     * Attempt to drop an orphan child view i.e. a child view for which we see 
a parent->child entry
+     * Attempt to drop an orphan child view i.e. a child view for which we see 
a
+     * {@code parent->child } entry
      * in SYSTEM.CHILD_LINK/SYSTEM.CATALOG (as a child) but for whom the 
parent no longer exists.
      * @param env Region Coprocessor environment
      * @param tenantIdBytes tenantId of the parent
      * @param schemaName schema of the parent
      * @param tableOrViewName parent table/view name
      * @param sysCatOrSysChildLink SYSTEM.CATALOG or SYSTEM.CHILD_LINK which 
is used to find the
-     *                             parent->child linking rows
+     *                             {@code parent->child } linking rows
      * @throws IOException thrown if there is an error scanning 
SYSTEM.CHILD_LINK or SYSTEM.CATALOG
      * @throws SQLException thrown if there is an error getting a connection 
to the server or
      * an error retrieving the PTable for a child view
@@ -435,8 +436,8 @@ public class ViewUtil {
 
 
     /**
-     * Determines whether we should use SYSTEM.CATALOG or SYSTEM.CHILD_LINK to 
find parent->child
-     * links i.e. {@link LinkType#CHILD_TABLE}.
+     * Determines whether we should use SYSTEM.CATALOG or SYSTEM.CHILD_LINK to 
find
+     * {@code parent->child } links i.e. {@link LinkType#CHILD_TABLE}.
      * If the client is older than 4.15.0 and the SYSTEM.CHILD_LINK table does 
not exist, we use
      * the SYSTEM.CATALOG table. In all other cases, we use the 
SYSTEM.CHILD_LINK table.
      * This is required for backwards compatibility.
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
index 5c434d8..5800a20 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
@@ -172,8 +172,8 @@ public class MonitorManager implements Workload {
     /**
      * This method should really only be used for testing
      *
-     * @return List < {@link org.apache.phoenix.pherf.result.Result} >
-     * @throws IOException
+     * @return {@code List < org.apache.phoenix.pherf.result.Result > }
+     * @throws Exception
      */
     public synchronized List<Result> readResults() throws Exception {
         ResultHandler handler = null;
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java
index 158ed11..93225d7 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java
@@ -36,7 +36,7 @@ public class Result {
      * @param type          {@link 
org.apache.phoenix.pherf.result.file.ResultFileDetails} Currently unused, but 
gives metadata about the
      *                      contents of the result.
      * @param header        Used for CSV, otherwise pass null. For CSV pass 
comma separated string of header fields.
-     * @param messageValues List<{@link ResultValue} All fields combined 
represent the data
+     * @param messageValues {@code List<ResultValue> } All fields combined 
represent the data
      *                      for a row to be written.
      */
     public Result(ResultFileDetails type, String header, List<ResultValue> 
messageValues) {
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
index 1cf740e..91db782 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
@@ -121,7 +121,7 @@ public class ResultManager {
     /**
      * Write a combined set of results for each result in the list.
      *
-     * @param dataModelResults List<{@link DataModelResult > </>}
+     * @param dataModelResults {@code List<DataModelResult > }
      * @throws Exception
      */
     public synchronized void write(List<DataModelResult> dataModelResults, 
RulesApplier rulesApplier) throws Exception {
@@ -145,7 +145,6 @@ public class ResultManager {
 
     /**
      * Allows for flushing all the {@link 
org.apache.phoenix.pherf.result.ResultHandler}
-     * @throws Exception
      */
     public synchronized void flush(){
         for (ResultHandler handler : resultHandlers) {
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RuleBasedDataGenerator.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RuleBasedDataGenerator.java
index 24ecd20..d68e468 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RuleBasedDataGenerator.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RuleBasedDataGenerator.java
@@ -24,7 +24,7 @@ public interface RuleBasedDataGenerator {
      * Get data value based on the rules
      * Implementations should be thread safe as multiple theads will call it 
in parallel
      *
-     * @return {@link org.apache.phoenix.pherf.rules.DataValue} Container Type 
--> Value mapping
+     * @return {@link org.apache.phoenix.pherf.rules.DataValue} {@code 
Container Type --> Value } mapping
      */
     DataValue getDataValue();
 }
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
index 321304f..92dd362 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
@@ -161,7 +161,7 @@ public class RulesApplier {
      * Get data value based on the supplied rule
      *
      * @param column {@link org.apache.phoenix.pherf.configuration.Column} 
Column rule to get data for
-     * @return {@link org.apache.phoenix.pherf.rules.DataValue} Container Type 
--> Value mapping
+     * @return {@link org.apache.phoenix.pherf.rules.DataValue} {@code 
Container Type --> Value mapping }
      */
     public DataValue getDataValue(Column column) throws Exception{
         DataValue data = null;

Reply via email to