Repository: spark
Updated Branches:
  refs/heads/master 287781742 -> 5536f3181


[MINOR][BUILD] Fix Java linter errors

## What changes were proposed in this pull request?

This PR cleans up a few Java linter errors for Apache Spark 2.3 release.

## How was this patch tested?

```bash
$ dev/lint-java
Using `mvn` from path: /usr/local/bin/mvn
Checkstyle checks passed.
```

We can see the result from [Travis 
CI](https://travis-ci.org/dongjoon-hyun/spark/builds/322470787), too.

Author: Dongjoon Hyun <dongj...@apache.org>

Closes #20101 from dongjoon-hyun/fix-java-lint.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/5536f318
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/5536f318
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/5536f318

Branch: refs/heads/master
Commit: 5536f3181c1e77c70f01d6417407d218ea48b961
Parents: 2877817
Author: Dongjoon Hyun <dongj...@apache.org>
Authored: Thu Dec 28 09:43:50 2017 -0600
Committer: Sean Owen <so...@cloudera.com>
Committed: Thu Dec 28 09:43:50 2017 -0600

----------------------------------------------------------------------
 .../java/org/apache/spark/memory/MemoryConsumer.java  |  3 ++-
 .../streaming/kinesis/KinesisInitialPositions.java    | 14 ++++++++------
 .../datasources/parquet/VectorizedColumnReader.java   |  3 +--
 .../parquet/VectorizedParquetRecordReader.java        |  4 ++--
 .../spark/sql/execution/vectorized/ColumnarRow.java   |  3 ++-
 .../spark/sql/sources/v2/SessionConfigSupport.java    |  3 ---
 .../sources/v2/streaming/ContinuousReadSupport.java   |  5 ++++-
 .../sources/v2/streaming/ContinuousWriteSupport.java  |  6 +++---
 .../spark/sql/sources/v2/streaming/reader/Offset.java |  3 ++-
 .../sources/v2/streaming/reader/PartitionOffset.java  |  1 -
 .../hive/service/cli/operation/SQLOperation.java      |  1 -
 11 files changed, 24 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java 
b/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java
index a7bd4b3..115e1fb 100644
--- a/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java
+++ b/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java
@@ -154,6 +154,7 @@ public abstract class MemoryConsumer {
       taskMemoryManager.freePage(page, this);
     }
     taskMemoryManager.showMemoryUsage();
-    throw new SparkOutOfMemoryError("Unable to acquire " + required + " bytes 
of memory, got " + got);
+    throw new SparkOutOfMemoryError("Unable to acquire " + required + " bytes 
of memory, got " +
+      got);
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java
----------------------------------------------------------------------
diff --git 
a/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java
 
b/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java
index 206e1e4..b5f5ab0 100644
--- 
a/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java
+++ 
b/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java
@@ -67,9 +67,10 @@ public class KinesisInitialPositions {
 
 
     /**
-     * Returns instance of [[KinesisInitialPosition]] based on the passed 
[[InitialPositionInStream]].
-     * This method is used in KinesisUtils for translating the 
InitialPositionInStream
-     * to InitialPosition. This function would be removed when we deprecate 
the KinesisUtils.
+     * Returns instance of [[KinesisInitialPosition]] based on the passed
+     * [[InitialPositionInStream]]. This method is used in KinesisUtils for 
translating the
+     * InitialPositionInStream to InitialPosition. This function would be 
removed when we deprecate
+     * the KinesisUtils.
      *
      * @return [[InitialPosition]]
      */
@@ -83,9 +84,10 @@ public class KinesisInitialPositions {
             // InitialPositionInStream.AT_TIMESTAMP is not supported.
             // Use InitialPosition.atTimestamp(timestamp) instead.
             throw new UnsupportedOperationException(
-                    "Only InitialPositionInStream.LATEST and 
InitialPositionInStream.TRIM_HORIZON " +
-                            "supported in initialPositionInStream(). Please 
use the initialPosition() from " +
-                            "builder API in KinesisInputDStream for using 
InitialPositionInStream.AT_TIMESTAMP");
+                    "Only InitialPositionInStream.LATEST and 
InitialPositionInStream." +
+                            "TRIM_HORIZON supported in 
initialPositionInStream(). Please use " +
+                            "the initialPosition() from builder API in 
KinesisInputDStream for " +
+                            "using InitialPositionInStream.AT_TIMESTAMP");
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
index 3ba1808..c120863 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
@@ -31,7 +31,6 @@ import org.apache.parquet.schema.OriginalType;
 import org.apache.parquet.schema.PrimitiveType;
 
 import org.apache.spark.sql.catalyst.util.DateTimeUtils;
-import org.apache.spark.sql.execution.vectorized.ColumnVector;
 import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
 import org.apache.spark.sql.types.DataTypes;
 import org.apache.spark.sql.types.DecimalType;
@@ -96,7 +95,7 @@ public class VectorizedColumnReader {
   private final OriginalType originalType;
   // The timezone conversion to apply to int96 timestamps. Null if no 
conversion.
   private final TimeZone convertTz;
-  private final static TimeZone UTC = DateTimeUtils.TimeZoneUTC();
+  private static final TimeZone UTC = DateTimeUtils.TimeZoneUTC();
 
   public VectorizedColumnReader(
       ColumnDescriptor descriptor,

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
index 14f2a58..6c157e8 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
@@ -79,8 +79,8 @@ public class VectorizedParquetRecordReader extends 
SpecificParquetRecordReaderBa
   private boolean[] missingColumns;
 
   /**
-   * The timezone that timestamp INT96 values should be converted to. Null if 
no conversion. Here to workaround
-   * incompatibilities between different engines when writing timestamp values.
+   * The timezone that timestamp INT96 values should be converted to. Null if 
no conversion. Here to
+   * workaround incompatibilities between different engines when writing 
timestamp values.
    */
   private TimeZone convertTz = null;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java
index 95c0d09..8bb33ed 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java
@@ -28,7 +28,8 @@ import org.apache.spark.unsafe.types.UTF8String;
  * to be reused, callers should copy the data out if it needs to be stored.
  */
 public final class ColumnarRow extends InternalRow {
-  // The data for this row. E.g. the value of 3rd int field is 
`data.getChildColumn(3).getInt(rowId)`.
+  // The data for this row.
+  // E.g. the value of 3rd int field is `data.getChildColumn(3).getInt(rowId)`.
   private final ColumnVector data;
   private final int rowId;
   private final int numFields;

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java
 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java
index 0b5b6ac..3cb020d 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java
@@ -19,9 +19,6 @@ package org.apache.spark.sql.sources.v2;
 
 import org.apache.spark.annotation.InterfaceStability;
 
-import java.util.List;
-import java.util.Map;
-
 /**
  * A mix-in interface for {@link DataSourceV2}. Data sources can implement 
this interface to
  * propagate session configs with the specified key-prefix to all data source 
operations in this

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java
 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java
index 8837bae..3136cee 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java
@@ -39,5 +39,8 @@ public interface ContinuousReadSupport extends DataSourceV2 {
    * @param options the options for the returned data source reader, which is 
an immutable
    *                case-insensitive string-to-string map.
    */
-  ContinuousReader createContinuousReader(Optional<StructType> schema, String 
checkpointLocation, DataSourceV2Options options);
+  ContinuousReader createContinuousReader(
+    Optional<StructType> schema,
+    String checkpointLocation,
+    DataSourceV2Options options);
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java
 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java
index ec15e43..dee493c 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java
@@ -39,9 +39,9 @@ public interface ContinuousWriteSupport extends 
BaseStreamingSink {
      * Creates an optional {@link ContinuousWriter} to save the data to this 
data source. Data
      * sources can return None if there is no writing needed to be done.
      *
-     * @param queryId A unique string for the writing query. It's possible 
that there are many writing
-     *                queries running at the same time, and the returned 
{@link DataSourceV2Writer}
-     *                can use this id to distinguish itself from others.
+     * @param queryId A unique string for the writing query. It's possible 
that there are many
+     *                writing queries running at the same time, and the 
returned
+     *                {@link DataSourceV2Writer} can use this id to 
distinguish itself from others.
      * @param schema the schema of the data to be written.
      * @param mode the output mode which determines what successive epoch 
output means to this
      *             sink, please refer to {@link OutputMode} for more details.

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java
 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java
index 517fdab..60b87f2 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java
@@ -42,7 +42,8 @@ public abstract class Offset extends 
org.apache.spark.sql.execution.streaming.Of
     @Override
     public boolean equals(Object obj) {
         if (obj instanceof org.apache.spark.sql.execution.streaming.Offset) {
-            return 
this.json().equals(((org.apache.spark.sql.execution.streaming.Offset) 
obj).json());
+            return this.json()
+                .equals(((org.apache.spark.sql.execution.streaming.Offset) 
obj).json());
         } else {
             return false;
         }

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java
 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java
index 729a612..eca0085 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java
@@ -26,5 +26,4 @@ import java.io.Serializable;
  * These offsets must be serializable.
  */
 public interface PartitionOffset extends Serializable {
-    
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/5536f318/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
index fd9108e..70c2794 100644
--- 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.VariableSubstitution;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.session.OperationLog;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.SerDe;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to