Repository: spark
Updated Branches:
  refs/heads/master ec30c1782 -> 6e94c4ead


SPARK-6225 [CORE] [SQL] [STREAMING] Resolve most build warnings, 1.3.0 edition

Resolve javac, scalac warnings of various types -- deprecations, Scala lang, 
unchecked cast, etc.

Author: Sean Owen <so...@cloudera.com>

Closes #4950 from srowen/SPARK-6225 and squashes the following commits:

3080972 [Sean Owen] Ordered imports: Java, Scala, 3rd party, Spark
c67985b [Sean Owen] Resolve javac, scalac warnings of various types -- 
deprecations, Scala lang, unchecked cast, etc.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/6e94c4ea
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/6e94c4ea
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/6e94c4ea

Branch: refs/heads/master
Commit: 6e94c4eadf443ac3d34eaae4c334c8386fdec960
Parents: ec30c17
Author: Sean Owen <so...@cloudera.com>
Authored: Wed Mar 11 13:15:19 2015 +0000
Committer: Sean Owen <so...@cloudera.com>
Committed: Wed Mar 11 13:15:19 2015 +0000

----------------------------------------------------------------------
 .../scala/org/apache/spark/SparkContext.scala   |  2 +-
 .../spark/scheduler/EventLoggingListener.scala  |  2 +-
 .../spark/util/MutableURLClassLoader.scala      |  2 --
 .../OutputCommitCoordinatorSuite.scala          |  2 +-
 .../spark/util/MutableURLClassLoaderSuite.scala |  3 +--
 .../streaming/JavaStatefulNetworkWordCount.java |  1 +
 .../org/apache/spark/examples/HBaseTest.scala   |  4 ++--
 .../kafka/JavaDirectKafkaStreamSuite.java       | 21 ++++++++------------
 .../streaming/kafka/JavaKafkaRDDSuite.java      | 21 ++++++++------------
 .../streaming/kafka/JavaKafkaStreamSuite.java   | 14 ++++++-------
 .../MatrixFactorizationModel.scala              |  8 ++++----
 .../org/apache/spark/sql/sources/ddl.scala      |  1 +
 .../sql/ScalaReflectionRelationSuite.scala      |  2 +-
 .../spark/sql/hive/HiveInspectorSuite.scala     |  2 +-
 .../apache/spark/streaming/JavaAPISuite.java    |  4 ++++
 15 files changed, 40 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/core/src/main/scala/org/apache/spark/SparkContext.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala 
b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 1a0bee4..8121aab 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -1104,7 +1104,7 @@ class SparkContext(config: SparkConf) extends Logging 
with ExecutorAllocationCli
       if (!fs.exists(hadoopPath)) {
         throw new FileNotFoundException(s"Added file $hadoopPath does not 
exist.")
       }
-      val isDir = fs.isDirectory(hadoopPath)
+      val isDir = fs.getFileStatus(hadoopPath).isDir
       if (!isLocal && scheme == "file" && isDir) {
         throw new SparkException(s"addFile does not support local directories 
when not running " +
           "local mode.")

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala 
b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
index 2091a9f..34fa6d2 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
@@ -95,7 +95,7 @@ private[spark] class EventLoggingListener(
    * Creates the log file in the configured log directory.
    */
   def start() {
-    if (!fileSystem.isDirectory(new Path(logBaseDir))) {
+    if (!fileSystem.getFileStatus(new Path(logBaseDir)).isDir) {
       throw new IllegalArgumentException(s"Log directory $logBaseDir does not 
exist.")
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala 
b/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala
index d9c7103..1e0ba5c 100644
--- a/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala
+++ b/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala
@@ -23,8 +23,6 @@ import java.util.concurrent.ConcurrentHashMap
 
 import scala.collection.JavaConversions._
 
-import org.apache.spark.util.ParentClassLoader
-
 /**
  * URL class loader that exposes the `addURL` and `getURLs` methods in 
URLClassLoader.
  */

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
----------------------------------------------------------------------
diff --git 
a/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
 
b/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
index 3cc860c..c8c9578 100644
--- 
a/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
@@ -153,7 +153,7 @@ class OutputCommitCoordinatorSuite extends FunSuite with 
BeforeAndAfter {
     def resultHandler(x: Int, y: Unit): Unit = {}
     val futureAction: SimpleFutureAction[Unit] = sc.submitJob[Int, Unit, 
Unit](rdd,
       OutputCommitFunctions(tempDir.getAbsolutePath).commitSuccessfully,
-      0 until rdd.partitions.size, resultHandler, 0)
+      0 until rdd.partitions.size, resultHandler, () => Unit)
     // It's an error if the job completes successfully even though no 
committer was authorized,
     // so throw an exception if the job was allowed to complete.
     intercept[TimeoutException] {

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala
----------------------------------------------------------------------
diff --git 
a/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala 
b/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala
index 31e3b7e..87de90b 100644
--- a/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala
@@ -21,8 +21,7 @@ import java.net.URLClassLoader
 
 import org.scalatest.FunSuite
 
-import org.apache.spark.{LocalSparkContext, SparkContext, SparkException, 
TestUtils}
-import org.apache.spark.util.Utils
+import org.apache.spark.{SparkContext, SparkException, TestUtils}
 
 class MutableURLClassLoaderSuite extends FunSuite {
 

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
index d46c710..dbf2ef0 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
@@ -82,6 +82,7 @@ public class JavaStatefulNetworkWordCount {
     ssc.checkpoint(".");
 
     // Initial RDD input to updateStateByKey
+    @SuppressWarnings("unchecked")
     List<Tuple2<String, Integer>> tuples = Arrays.asList(new Tuple2<String, 
Integer>("hello", 1),
             new Tuple2<String, Integer>("world", 1));
     JavaPairRDD<String, Integer> initialRDD = 
ssc.sc().parallelizePairs(tuples);

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala 
b/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
index 8226733..f4684b4 100644
--- a/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
@@ -18,7 +18,7 @@
 package org.apache.spark.examples
 
 import org.apache.hadoop.hbase.client.HBaseAdmin
-import org.apache.hadoop.hbase.{HBaseConfiguration, HTableDescriptor}
+import org.apache.hadoop.hbase.{HBaseConfiguration, HTableDescriptor, 
TableName}
 import org.apache.hadoop.hbase.mapreduce.TableInputFormat
 
 import org.apache.spark._
@@ -36,7 +36,7 @@ object HBaseTest {
     // Initialize hBase table if necessary
     val admin = new HBaseAdmin(conf)
     if (!admin.isTableAvailable(args(0))) {
-      val tableDesc = new HTableDescriptor(args(0))
+      val tableDesc = new HTableDescriptor(TableName.valueOf(args(0)))
       admin.createTable(tableDesc)
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java
----------------------------------------------------------------------
diff --git 
a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java
 
b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java
index 1334cc8..d6ca6d5 100644
--- 
a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java
+++ 
b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java
@@ -20,32 +20,27 @@ package org.apache.spark.streaming.kafka;
 import java.io.Serializable;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Random;
 import java.util.Arrays;
 
-import org.apache.spark.SparkConf;
-
 import scala.Tuple2;
 
-import junit.framework.Assert;
-
 import kafka.common.TopicAndPartition;
 import kafka.message.MessageAndMetadata;
 import kafka.serializer.StringDecoder;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
 
+import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.function.Function;
-import org.apache.spark.streaming.api.java.JavaDStream;
 import org.apache.spark.streaming.Durations;
+import org.apache.spark.streaming.api.java.JavaDStream;
 import org.apache.spark.streaming.api.java.JavaStreamingContext;
 
-import org.junit.Test;
-import org.junit.After;
-import org.junit.Before;
-
 public class JavaDirectKafkaStreamSuite implements Serializable {
   private transient JavaStreamingContext ssc = null;
-  private transient Random random = new Random();
   private transient KafkaStreamSuiteBase suiteBase = null;
 
   @Before
@@ -93,7 +88,7 @@ public class JavaDirectKafkaStreamSuite implements 
Serializable {
     ).map(
         new Function<Tuple2<String, String>, String>() {
           @Override
-          public String call(scala.Tuple2<String, String> kv) throws Exception 
{
+          public String call(Tuple2<String, String> kv) throws Exception {
             return kv._2();
           }
         }
@@ -121,7 +116,7 @@ public class JavaDirectKafkaStreamSuite implements 
Serializable {
     unifiedStream.foreachRDD(
         new Function<JavaRDD<String>, Void>() {
           @Override
-          public Void call(org.apache.spark.api.java.JavaRDD<String> rdd) 
throws Exception {
+          public Void call(JavaRDD<String> rdd) throws Exception {
             result.addAll(rdd.collect());
             return null;
           }

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java
----------------------------------------------------------------------
diff --git 
a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java
 
b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java
index 9d2e170..4477b81 100644
--- 
a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java
+++ 
b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java
@@ -19,27 +19,22 @@ package org.apache.spark.streaming.kafka;
 
 import java.io.Serializable;
 import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Arrays;
-
-import org.apache.spark.SparkConf;
 
 import scala.Tuple2;
 
-import junit.framework.Assert;
-
 import kafka.common.TopicAndPartition;
 import kafka.message.MessageAndMetadata;
 import kafka.serializer.StringDecoder;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
 
+import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.api.java.function.Function;
 
-import org.junit.Test;
-import org.junit.After;
-import org.junit.Before;
-
 public class JavaKafkaRDDSuite implements Serializable {
   private transient JavaSparkContext sc = null;
   private transient KafkaStreamSuiteBase suiteBase = null;
@@ -78,8 +73,8 @@ public class JavaKafkaRDDSuite implements Serializable {
       OffsetRange.create(topic2, 0, 0, 1)
     };
 
-    HashMap<TopicAndPartition, Broker> emptyLeaders = new HashMap();
-    HashMap<TopicAndPartition, Broker> leaders = new HashMap();
+    HashMap<TopicAndPartition, Broker> emptyLeaders = new 
HashMap<TopicAndPartition, Broker>();
+    HashMap<TopicAndPartition, Broker> leaders = new 
HashMap<TopicAndPartition, Broker>();
     String[] hostAndPort = suiteBase.brokerAddress().split(":");
     Broker broker = Broker.create(hostAndPort[0], 
Integer.parseInt(hostAndPort[1]));
     leaders.put(new TopicAndPartition(topic1, 0), broker);
@@ -96,7 +91,7 @@ public class JavaKafkaRDDSuite implements Serializable {
     ).map(
         new Function<Tuple2<String, String>, String>() {
           @Override
-          public String call(scala.Tuple2<String, String> kv) throws Exception 
{
+          public String call(Tuple2<String, String> kv) throws Exception {
             return kv._2();
           }
         }

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java
----------------------------------------------------------------------
diff --git 
a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java
 
b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java
index 208cc51..bad0a93 100644
--- 
a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java
+++ 
b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java
@@ -22,27 +22,25 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.spark.SparkConf;
-import org.apache.spark.streaming.Duration;
 import scala.Predef;
 import scala.Tuple2;
 import scala.collection.JavaConverters;
 
-import junit.framework.Assert;
-
 import kafka.serializer.StringDecoder;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
 
+import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.JavaPairRDD;
 import org.apache.spark.api.java.function.Function;
 import org.apache.spark.storage.StorageLevel;
+import org.apache.spark.streaming.Duration;
 import org.apache.spark.streaming.api.java.JavaDStream;
 import org.apache.spark.streaming.api.java.JavaPairDStream;
 import org.apache.spark.streaming.api.java.JavaStreamingContext;
 
-import org.junit.Test;
-import org.junit.After;
-import org.junit.Before;
-
 public class JavaKafkaStreamSuite implements Serializable {
   private transient JavaStreamingContext ssc = null;
   private transient Random random = new Random();

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
 
b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
index c399496..5f5a996 100644
--- 
a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
@@ -199,12 +199,12 @@ object MatrixFactorizationModel extends 
Loader[MatrixFactorizationModel] {
       assert(formatVersion == thisFormatVersion)
       val rank = (metadata \ "rank").extract[Int]
       val userFeatures = sqlContext.parquetFile(userPath(path))
-        .map { case Row(id: Int, features: Seq[Double]) =>
-          (id, features.toArray)
+        .map { case Row(id: Int, features: Seq[_]) =>
+          (id, features.asInstanceOf[Seq[Double]].toArray)
         }
       val productFeatures = sqlContext.parquetFile(productPath(path))
-        .map { case Row(id: Int, features: Seq[Double]) =>
-        (id, features.toArray)
+        .map { case Row(id: Int, features: Seq[_]) =>
+        (id, features.asInstanceOf[Seq[Double]].toArray)
       }
       new MatrixFactorizationModel(rank, userFeatures, productFeatures)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala
index 5020689..76754a6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql.sources
 
+import scala.language.existentials
 import scala.language.implicitConversions
 
 import org.apache.spark.Logging

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
index 23df6e7..17e923c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
@@ -86,7 +86,7 @@ class ScalaReflectionRelationSuite extends FunSuite {
 
     assert(sql("SELECT * FROM reflectData").collect().head ===
       Row("a", 1, 1L, 1.toFloat, 1.toDouble, 1.toShort, 1.toByte, true,
-        new java.math.BigDecimal(1), new Date(70, 0, 1), // This is 1970-01-01
+        new java.math.BigDecimal(1), Date.valueOf("1970-01-01"),
         new Timestamp(12345), Seq(1,2,3)))
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
index 09bbd5c..3181cfe 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
@@ -75,7 +75,7 @@ class HiveInspectorSuite extends FunSuite with HiveInspectors 
{
     Literal(0.asInstanceOf[Float]) ::
     Literal(0.asInstanceOf[Double]) ::
     Literal("0") ::
-    Literal(new java.sql.Date(114, 8, 23)) ::
+    Literal(java.sql.Date.valueOf("2014-09-23")) ::
     Literal(Decimal(BigDecimal(123.123))) ::
     Literal(new java.sql.Timestamp(123123)) ::
     Literal(Array[Byte](1,2,3)) ::

http://git-wip-us.apache.org/repos/asf/spark/blob/6e94c4ea/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
----------------------------------------------------------------------
diff --git 
a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java 
b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
index 57302ff..9034075 100644
--- a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
+++ b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
@@ -316,6 +316,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext 
implements Serializa
     testReduceByWindow(false);
   }
 
+  @SuppressWarnings("unchecked")
   private void testReduceByWindow(boolean withInverse) {
     List<List<Integer>> inputData = Arrays.asList(
         Arrays.asList(1,2,3),
@@ -684,6 +685,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext 
implements Serializa
     JavaDStream<Long> transformed1 = ssc.transform(
       listOfDStreams1,
       new Function2<List<JavaRDD<?>>, Time, JavaRDD<Long>>() {
+        @Override
         public JavaRDD<Long> call(List<JavaRDD<?>> listOfRDDs, Time time) {
           Assert.assertEquals(2, listOfRDDs.size());
           return null;
@@ -697,6 +699,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext 
implements Serializa
     JavaPairDStream<Integer, Tuple2<Integer, String>> transformed2 = 
ssc.transformToPair(
       listOfDStreams2,
       new Function2<List<JavaRDD<?>>, Time, JavaPairRDD<Integer, 
Tuple2<Integer, String>>>() {
+        @Override
         public JavaPairRDD<Integer, Tuple2<Integer, String>> 
call(List<JavaRDD<?>> listOfRDDs, Time time) {
           Assert.assertEquals(3, listOfRDDs.size());
           JavaRDD<Integer> rdd1 = (JavaRDD<Integer>)listOfRDDs.get(0);
@@ -1829,6 +1832,7 @@ public class JavaAPISuite extends 
LocalJavaStreamingContext implements Serializa
     return expected;
   }
 
+  @SuppressWarnings("unchecked")
   // SPARK-5795: no logic assertions, just testing that intended API 
invocations compile
   private void compileSaveAsJavaAPI(JavaPairDStream<LongWritable,Text> pds) {
     pds.saveAsNewAPIHadoopFiles(


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to