Repository: hive
Updated Branches:
  refs/heads/master 8dfa8a5ae -> 8d2b4e905


HIVE-16715: Clean up javadoc from errors in modules llap-client, metastore, 
spark-client (Janos Gub via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich <k...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8d2b4e90
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8d2b4e90
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8d2b4e90

Branch: refs/heads/master
Commit: 8d2b4e905c9d802ac0fc34c5a2c7344bffa5d451
Parents: 8dfa8a5
Author: Janos Gub <gubja...@gmail.com>
Authored: Fri Jun 16 14:38:47 2017 +0200
Committer: Zoltan Haindrich <k...@rxd.hu>
Committed: Fri Jun 16 14:38:47 2017 +0200

----------------------------------------------------------------------
 .../llap/ext/LlapTaskUmbilicalExternalClient.java     |  1 -
 .../hadoop/hive/llap/registry/ServiceInstance.java    |  2 +-
 .../apache/hadoop/hive/metastore/DatabaseProduct.java |  2 +-
 .../org/apache/hadoop/hive/metastore/Deadline.java    |  4 ++--
 .../hadoop/hive/metastore/HiveMetaStoreClient.java    |  8 ++++----
 .../hadoop/hive/metastore/IMetaStoreClient.java       | 14 +++++++-------
 .../hadoop/hive/metastore/IMetaStoreSchemaInfo.java   |  2 +-
 .../hadoop/hive/metastore/MetaStoreFilterHook.java    |  5 ++---
 .../apache/hadoop/hive/metastore/MetaStoreThread.java |  2 +-
 .../apache/hadoop/hive/metastore/MetaStoreUtils.java  |  5 ++---
 .../hadoop/hive/metastore/PartFilterExprUtil.java     |  2 +-
 .../org/apache/hadoop/hive/metastore/RawStore.java    |  2 --
 .../hadoop/hive/metastore/events/InsertEvent.java     |  2 +-
 .../hadoop/hive/metastore/hbase/HBaseReadWrite.java   |  1 -
 .../hadoop/hive/metastore/hbase/MetadataStore.java    |  2 +-
 .../event/filters/DatabaseAndTableFilter.java         |  2 +-
 .../hadoop/hive/metastore/parser/ExpressionTree.java  |  4 +---
 .../hadoop/hive/metastore/tools/HiveSchemaHelper.java |  2 +-
 .../apache/hadoop/hive/metastore/txn/TxnStore.java    |  2 +-
 .../apache/hadoop/hive/metastore/txn/TxnUtils.java    |  2 +-
 .../hive/metastore/model/MStorageDescriptor.java      |  2 +-
 .../org/apache/hive/spark/client/SparkClient.java     |  9 +++++----
 .../apache/hive/spark/client/rpc/RpcDispatcher.java   |  7 ++++---
 23 files changed, 39 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
----------------------------------------------------------------------
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
index c7de417..406bdda 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
@@ -196,7 +196,6 @@ public class LlapTaskUmbilicalExternalClient extends 
AbstractService implements
 
   /**
    * Submit the work for actual execution.
-   * @throws InvalidProtocolBufferException 
    */
   public void submitWork(SubmitWorkRequestProto request, String llapHost, int 
llapPort) {
     // Register the pending events to be sent for this spec.

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
----------------------------------------------------------------------
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
index 081995c..70515c4 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
@@ -20,7 +20,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 public interface ServiceInstance {
 
   /**
-   * Worker identity is a UUID (unique across restarts), to identify a node 
which died & was brought
+   * Worker identity is a UUID (unique across restarts), to identify a node 
which died &amp; was brought
    * back on the same host/port
    */
   public String getWorkerIdentity();

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
index 33abbb2..7634852 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
@@ -27,7 +27,7 @@ public enum DatabaseProduct {
 
   /**
    * Determine the database product type
-   * @param conn database connection
+   * @param productName string to defer database connection
    * @return database product type
    */
   public static DatabaseProduct determineDatabaseProduct(String productName) 
throws SQLException {

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java
index 6149224..99bd7b0 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java
@@ -82,7 +82,7 @@ public class Deadline {
 
   /**
    * reset the timeout value of this timer.
-   * @param timeout
+   * @param timeoutMs
    */
   public static void resetTimeout(long timeoutMs) throws MetaException {
     if (timeoutMs <= 0) {
@@ -139,7 +139,7 @@ public class Deadline {
 
   /**
    * Check whether the long running method timeout.
-   * @throws DeadlineException when the method timeout
+   * @throws MetaException when the method timeout
    */
   public static void checkTimeout() throws MetaException {
     Deadline deadline = getCurrentDeadline();

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 2a3dcc4..b99f40a 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -663,7 +663,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient {
    * @param partitionSpecs partitions specs of the parent partition to be 
exchanged
    * @param destDb the db of the destination table
    * @param destinationTableName the destination table name
-   @ @return new partition after exchanging
+   * @return new partition after exchanging
    */
   @Override
   public Partition exchange_partition(Map<String, String> partitionSpecs,
@@ -679,7 +679,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient {
    * @param partitionSpecs partitions specs of the parent partition to be 
exchanged
    * @param destDb the db of the destination table
    * @param destinationTableName the destination table name
-   @ @return new partitions after exchanging
+   * @return new partitions after exchanging
    */
   @Override
   public List<Partition> exchange_partitions(Map<String, String> 
partitionSpecs,
@@ -1244,7 +1244,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient {
    * @param db_name the database name
    * @param tbl_name the table name
    * @param filter the filter string,
-   *    for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering 
can
+   *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". 
Filtering can
    *    be done only on string partition keys.
    * @param max_parts the maximum number of partitions to return,
    *    all partitions are returned if -1 is passed
@@ -1517,7 +1517,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient {
    * @param db_name the database name
    * @param tbl_name the table name
    * @param filter the filter string,
-   *    for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering 
can
+   *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". 
Filtering can
    *    be done only on string partition keys.
    * @return number of partitions
    * @throws MetaException

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 64a71df..d4bbef0 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -211,7 +211,7 @@ public interface IMetaStoreClient {
 
   /**
    * Get a list of table names that match a filter.
-   * The filter operators are LIKE, <, <=, >, >=, =, <>
+   * The filter operators are LIKE, &lt;, &lt;=, &gt;, &gt;=, =, &lt;&gt;
    *
    * In the filter statement, values interpreted as strings must be enclosed 
in quotes,
    * while values interpreted as integers should not be.  Strings and integers 
are the only
@@ -223,12 +223,12 @@ public interface IMetaStoreClient {
    * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access 
times
    *   and supports all filter operators except LIKE
    * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' 
parameter keys and values
-   *   and only supports the filter operators = and <>.
+   *   and only supports the filter operators = and &lt;&gt;.
    *   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter 
statement.
    *   For example, to filter on parameter keys called "retention", the key 
name in the filter
    *   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
-   *   Also, = and <> only work for keys that exist in the tables.
-   *   E.g., filtering on tables where key1 <> value will only
+   *   Also, = and &lt;&gt; only work for keys that exist in the tables.
+   *   E.g., filtering on tables where key1 &lt;&gt; value will only
    *   return tables that have a value for the parameter key1.
    * Some example filter statements include:
    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
@@ -589,7 +589,7 @@ public interface IMetaStoreClient {
    * @param dbName the database name
    * @param tableName the table name
    * @param filter the filter string,
-   *    for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering 
can
+   *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". 
Filtering can
    *    be done only on string partition keys.
    * @return number of partitions
    * @throws MetaException
@@ -605,7 +605,7 @@ public interface IMetaStoreClient {
    * @param db_name the database name
    * @param tbl_name the table name
    * @param filter the filter string,
-   *    for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering 
can
+   *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". 
Filtering can
    *    be done only on string partition keys.
    * @param max_parts the maximum number of partitions to return,
    *    all partitions are returned if -1 is passed
@@ -1563,7 +1563,7 @@ public interface IMetaStoreClient {
    * Get the next set of notifications from the database.
    * @param lastEventId The last event id that was consumed by this reader.  
The returned
    *                    notifications will start at the next eventId available 
after this eventId.
-   * @param maxEvents Maximum number of events to return.  If < 1, then all 
available events will
+   * @param maxEvents Maximum number of events to return.  If &lt; 1, then all 
available events will
    *                  be returned.
    * @param filter User provided filter to remove unwanted events.  If null, 
all events will be
    *               returned.

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java
index 296d04b..0a5f978 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java
@@ -99,7 +99,7 @@ public interface IMetaStoreSchemaInfo {
    * 'rolling downgrade' is happening. This is a state where hive is 
functional and returning non
    * zero status for it is misleading.
    *
-   * @param hiveVersion version of hive software
+   * @param productVersion version of hive software
    * @param dbVersion version of metastore rdbms schema
    * @return true if versions are compatible
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java
index 933ae2d..2263837 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java
@@ -58,7 +58,7 @@ public interface MetaStoreFilterHook {
    * Filter given list of tables
    * @param dbName
    * @param tableList
-   * @returnList of filtered table names
+   * @return List of filtered table names
    */
   public List<String> filterTableNames(String dbName, List<String> tableList) 
throws MetaException;
 
@@ -72,9 +72,8 @@ public interface MetaStoreFilterHook {
 
   /**
    * Filter given list of tables
-   * @param dbName
    * @param tableList
-   * @returnList of filtered table names
+   * @return List of filtered table names
    */
   public List<Table> filterTables(List<Table> tableList) throws MetaException;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
index a0c8d3b..b62c45f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
@@ -55,7 +55,7 @@ public interface MetaStoreThread {
 
   /**
    * Run the thread in the background.  This must not be called until
-   * {@link ##init(java.util.concurrent.atomic.AtomicBoolean, 
java.util.concurrent.atomic.AtomicBoolean)} has
+   * {@link 
MetaStoreThread#init(java.util.concurrent.atomic.AtomicBoolean,java.util.concurrent.atomic.AtomicBoolean)}
 has
    * been called.
    */
   void start();

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index ff8412c..1aaba4c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -576,7 +576,6 @@ public class MetaStoreUtils {
    * @param conf
    *          hive configuration
    * @return true or false depending on conformance
-   * @exception MetaException
    *              if it doesn't match the pattern.
    */
   static public boolean validateName(String name, Configuration conf) {
@@ -700,7 +699,7 @@ public class MetaStoreUtils {
    * validate column type
    *
    * if it is predefined, yes. otherwise no
-   * @param name
+   * @param type
    * @return
    */
   static public String validateColumnType(String type) {
@@ -858,7 +857,7 @@ public class MetaStoreUtils {
    * @return String containing "Thrift
    *         DDL#comma-separated-column-names#colon-separated-columntypes
    *         Example:
-   *         "struct result { a string, map<int,string> 
b}#a,b#string:map<int,string>"
+   *         "struct result { a string, map&lt;int,string&gt; 
b}#a,b#string:map&lt;int,string&gt;"
    */
   public static String getFullDDLFromFieldSchema(String structName,
       List<FieldSchema> fieldSchemas) {

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
index e38e8dd..41d7e81 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
@@ -62,7 +62,7 @@ public class PartFilterExprUtil {
 
   /**
    * Creates the proxy used to evaluate expressions. This is here to prevent 
circular
-   * dependency - ql -&gt; metastore client &lt;-&gt metastore server -&gt ql. 
If server and
+   * dependency - ql -&gt; metastore client &lt;-&gt; metastore server -&gt; 
ql. If server and
    * client are split, this can be removed.
    * @param conf Configuration.
    * @return The partition expression proxy.

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 67506f2..8f6af9f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -407,7 +407,6 @@ public interface RawStore extends Configurable {
    * @return Relevant column statistics for the column for the given table
    * @throws NoSuchObjectException
    * @throws MetaException
-   * @throws InvalidInputException
    *
    */
   public abstract ColumnStatistics getTableColumnStatistics(String dbName, 
String tableName,
@@ -543,7 +542,6 @@ public interface RawStore extends Configurable {
    * Drop a function definition.
    * @param dbName
    * @param funcName
-   * @return
    * @throws MetaException
    * @throws NoSuchObjectException
    * @throws InvalidObjectException

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
index c33ade1..56607d3 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
@@ -43,7 +43,7 @@ public class InsertEvent extends ListenerEvent {
    * @param db name of the database the table is in
    * @param table name of the table being inserted into
    * @param partVals list of partition values, can be null
-   * @param insertData the inserted files & their checksums
+   * @param insertData the inserted files and their checksums
    * @param status status of insert, true = success, false = failure
    * @param handler handler that is firing the event
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
index d711805..ab6457e 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
@@ -2297,7 +2297,6 @@ public class HBaseReadWrite implements MetadataStore {
 
   /**
    * @param fileIds file ID list.
-   * @return Serialized file metadata.
    */
   @Override
   public void getFileMetadata(List<Long> fileIds, ByteBuffer[] result) throws 
IOException {

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java
index 0382e8a..d427fef 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java
@@ -41,7 +41,7 @@ public interface MetadataStore {
 
   /**
    * @param fileId The file ID.
-   * @param metadataBuffers Serialized file metadata.
+   * @param metadata Serialized file metadata.
    * @param addedCols The column names for additional columns created by 
file-format-specific
    *                  metadata handler, to be stored in the cache.
    * @param addedVals The values for addedCols; one value per added column.

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
index 4a7ca6d..490d3b4 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
  * Utility function that constructs a notification filter to match a given db 
name and/or table name.
  * If dbName == null, fetches all warehouse events.
  * If dnName != null, but tableName == null, fetches all events for the db
- * If dbName != null && tableName != null, fetches all events for the 
specified table
+ * If dbName != null &amp;&amp; tableName != null, fetches all events for the 
specified table
  */
 public class DatabaseAndTableFilter extends BasicFilter {
   private final String databaseName, tableName;

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
index 10fcbea..8b12899 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
@@ -230,8 +230,7 @@ public class ExpressionTree {
      *        tables that match the filter.
      * @param params
      *        A map of parameter key to values for the filter statement.
-     * @param filterBuilder The filter builder that is used to build filter.
-     * @return a JDO filter statement
+     * @param filterBuffer The filter builder that is used to build filter.
      * @throws MetaException
      */
     public void generateJDOFilter(Configuration conf, Table table,
@@ -385,7 +384,6 @@ public class ExpressionTree {
     }
 
     /**
-     * @param operator operator
      * @return true iff filter pushdown for this operator can be done for 
integral types.
      */
     public boolean canJdoUseStringsWithIntegral() {

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
index 0127bdd..620ea5f 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
@@ -48,7 +48,7 @@ public class HiveSchemaHelper {
    * @param printInfo print connection parameters
    * @param hiveConf hive config object
    * @return metastore connection object
-   * @throws org.apache.hadoop.hive.metastore.api.MetaException
+   * @throws org.apache.hadoop.hive.metastore.HiveMetaException
    */
   public static Connection getConnectionToMetastore(String userName,
       String password, String url, String driver, boolean printInfo,

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index 0b0df85..3eb3827 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@ -424,7 +424,7 @@ public interface TxnStore {
   }
 
   /**
-   * Once a {@link java.util.concurrent.ThreadPoolExecutor.Worker} submits a 
job to the cluster,
+   * Once a {@link java.util.concurrent.ThreadPoolExecutor} Worker submits a 
job to the cluster,
    * it calls this to update the metadata.
    * @param id {@link CompactionInfo#id}
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index b2ced6b..e6c62d3 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -75,7 +75,7 @@ public class TxnUtils {
    * Transform a {@link 
org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse} to a
    * {@link org.apache.hadoop.hive.common.ValidTxnList}.  This assumes that 
the caller intends to
    * compact the files, and thus treats only open transactions as invalid.  
Additionally any
-   * txnId > highestOpenTxnId is also invalid.  This is to avoid creating 
something like
+   * txnId &gt; highestOpenTxnId is also invalid.  This is to avoid creating 
something like
    * delta_17_120 where txnId 80, for example, is still open.
    * @param txns txn list from the metastore
    * @return a valid txn list.

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java
 
b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java
index 9da3071..2e021af 100644
--- 
a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java
+++ 
b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java
@@ -252,7 +252,7 @@ public class MStorageDescriptor {
   }
 
   /**
-   * @param skewedColValueLocationMaps the skewedColValueLocationMaps to set
+   * @param listBucketColValuesMapping the skewedColValueLocationMaps to set
    */
   public void setSkewedColValueLocationMaps(Map<MStringList, String> 
listBucketColValuesMapping) {
     this.skewedColValueLocationMaps = listBucketColValuesMapping;

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java
----------------------------------------------------------------------
diff --git 
a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java 
b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java
index e952f27..1922e41 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java
@@ -49,15 +49,16 @@ public interface SparkClient extends Serializable {
 
   /**
    * Asks the remote context to run a job immediately.
-   * <p/>
+   * <p>
    * Normally, the remote context will queue jobs and execute them based on 
how many worker
    * threads have been configured. This method will run the submitted job in 
the same thread
    * processing the RPC message, so that queueing does not apply.
-   * <p/>
+   * </p>
+   * <p>
    * It's recommended that this method only be used to run code that finishes 
quickly. This
    * avoids interfering with the normal operation of the context.
-   * <p/>
-   * Note: the {@link JobContext#monitor()} functionality is not available 
when using this method.
+   * </p>
+   * Note: the JobContext#monitor() functionality is not available when using 
this method.
    *
    * @param job The job to execute.
    * @return A future to monitor the result of the job.

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java
----------------------------------------------------------------------
diff --git 
a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java
 
b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java
index 2b6ab29..00f5a17 100644
--- 
a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java
+++ 
b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java
@@ -37,14 +37,15 @@ import 
org.apache.hadoop.hive.common.classification.InterfaceAudience;
 /**
  * An implementation of ChannelInboundHandler that dispatches incoming 
messages to an instance
  * method based on the method signature.
- * <p/>
+ * <p>
  * A handler's signature must be of the form:
- * <p/>
+ * </p>
  * <blockquote><tt>protected void handle(ChannelHandlerContext, 
MessageType)</tt></blockquote>
- * <p/>
+ * <p>
  * Where "MessageType" must match exactly the type of the message to handle. 
Polymorphism is not
  * supported. Handlers can return a value, which becomes the RPC reply; if a 
null is returned, then
  * a reply is still sent, with an empty payload.
+ * </p>
  */
 @InterfaceAudience.Private
 public abstract class RpcDispatcher extends 
SimpleChannelInboundHandler<Object> {

Reply via email to